code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__snake_case : Tuple = logging.get_logger(__name__)
__snake_case : Any = {
'microsoft/conditional-detr-resnet-50': (
'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'
),
}
class UpperCamelCase ( a ):
"""simple docstring"""
_lowerCamelCase : Dict ='''conditional_detr'''
_lowerCamelCase : str =['''past_key_values''']
_lowerCamelCase : Optional[Any] ={
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Union[str, Any] , _lowerCamelCase : Dict=True , _lowerCamelCase : str=None , _lowerCamelCase : Any=3 , _lowerCamelCase : List[Any]=3_0_0 , _lowerCamelCase : Optional[int]=6 , _lowerCamelCase : List[str]=2_0_4_8 , _lowerCamelCase : str=8 , _lowerCamelCase : Any=6 , _lowerCamelCase : List[Any]=2_0_4_8 , _lowerCamelCase : int=8 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : Dict=True , _lowerCamelCase : Optional[int]="relu" , _lowerCamelCase : Tuple=2_5_6 , _lowerCamelCase : Union[str, Any]=0.1 , _lowerCamelCase : List[str]=0.0 , _lowerCamelCase : str=0.0 , _lowerCamelCase : Union[str, Any]=0.02 , _lowerCamelCase : Optional[Any]=1.0 , _lowerCamelCase : Tuple=False , _lowerCamelCase : Tuple="sine" , _lowerCamelCase : List[Any]="resnet50" , _lowerCamelCase : Tuple=True , _lowerCamelCase : Any=False , _lowerCamelCase : Union[str, Any]=2 , _lowerCamelCase : str=5 , _lowerCamelCase : Optional[int]=2 , _lowerCamelCase : int=1 , _lowerCamelCase : Dict=1 , _lowerCamelCase : Tuple=2 , _lowerCamelCase : List[Any]=5 , _lowerCamelCase : Union[str, Any]=2 , _lowerCamelCase : Any=0.25 , **_lowerCamelCase : str , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
A__ = CONFIG_MAPPING['resnet'](out_features=['''stage4'''] )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
A__ = backbone_config.get('''model_type''' )
A__ = CONFIG_MAPPING[backbone_model_type]
A__ = config_class.from_dict(SCREAMING_SNAKE_CASE__ )
A__ = use_timm_backbone
A__ = backbone_config
A__ = num_channels
A__ = num_queries
A__ = d_model
A__ = encoder_ffn_dim
A__ = encoder_layers
A__ = encoder_attention_heads
A__ = decoder_ffn_dim
A__ = decoder_layers
A__ = decoder_attention_heads
A__ = dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = activation_function
A__ = init_std
A__ = init_xavier_std
A__ = encoder_layerdrop
A__ = decoder_layerdrop
A__ = encoder_layers
A__ = auxiliary_loss
A__ = position_embedding_type
A__ = backbone
A__ = use_pretrained_backbone
A__ = dilation
# Hungarian matcher
A__ = class_cost
A__ = bbox_cost
A__ = giou_cost
# Loss coefficients
A__ = mask_loss_coefficient
A__ = dice_loss_coefficient
A__ = cls_loss_coefficient
A__ = bbox_loss_coefficient
A__ = giou_loss_coefficient
A__ = focal_alpha
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def A__ ( self : str ):
return self.encoder_attention_heads
@property
def A__ ( self : int ):
return self.d_model
def A__ ( self : Union[str, Any] ):
A__ = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
A__ = self.backbone_config.to_dict()
A__ = self.__class__.model_type
return output
class UpperCamelCase ( a ):
"""simple docstring"""
_lowerCamelCase : List[Any] =version.parse("1.11" )
@property
def A__ ( self : Any ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def A__ ( self : Optional[Any] ):
return 1E-5
@property
def A__ ( self : Optional[Any] ):
return 1_2
| 571 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a =logging.get_logger(__name__)
a ={"""vocab_file""": """vocab.txt"""}
a ={
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
a ={
"""openbmb/cpm-ant-10b""": 1024,
}
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
__lowerCamelCase : int = collections.OrderedDict()
with open(lowerCamelCase__ , 'r' , encoding='utf-8' ) as reader:
__lowerCamelCase : Optional[int] = reader.readlines()
for index, token in enumerate(lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] = token.rstrip('\n' )
__lowerCamelCase : Union[str, Any] = index
return vocab
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Optional[int]="<unk>" ,SCREAMING_SNAKE_CASE__ : Optional[int]=2_0_0):
__lowerCamelCase : str = vocab
__lowerCamelCase : Dict = unk_token
__lowerCamelCase : int = max_input_chars_per_word
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
__lowerCamelCase : int = list(SCREAMING_SNAKE_CASE__)
if len(SCREAMING_SNAKE_CASE__) > self.max_input_chars_per_word:
return [self.unk_token]
__lowerCamelCase : Tuple = 0
__lowerCamelCase : str = []
while start < len(SCREAMING_SNAKE_CASE__):
__lowerCamelCase : List[Any] = len(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = None
while start < end:
__lowerCamelCase : Any = ''.join(chars[start:end])
if substr in self.vocab:
__lowerCamelCase : Optional[Any] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token)
start += 1
else:
sub_tokens.append(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = end
return sub_tokens
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[str] = VOCAB_FILES_NAMES
_UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : str = ['''input_ids''', '''attention_mask''']
_UpperCAmelCase : Optional[int] = False
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Tuple="<d>" ,SCREAMING_SNAKE_CASE__ : Tuple="</d>" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="<s>" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="</s>" ,SCREAMING_SNAKE_CASE__ : str="<pad>" ,SCREAMING_SNAKE_CASE__ : List[str]="<unk>" ,SCREAMING_SNAKE_CASE__ : List[Any]="</n>" ,SCREAMING_SNAKE_CASE__ : int="</_>" ,SCREAMING_SNAKE_CASE__ : List[Any]="left" ,**SCREAMING_SNAKE_CASE__ : List[str] ,):
requires_backends(self ,['jieba'])
super().__init__(
bod_token=SCREAMING_SNAKE_CASE__ ,eod_token=SCREAMING_SNAKE_CASE__ ,bos_token=SCREAMING_SNAKE_CASE__ ,eos_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,line_token=SCREAMING_SNAKE_CASE__ ,space_token=SCREAMING_SNAKE_CASE__ ,padding_side=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : Optional[Any] = bod_token
__lowerCamelCase : Dict = eod_token
__lowerCamelCase : Any = load_vocab(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = self.encoder[space_token]
__lowerCamelCase : Dict = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
__lowerCamelCase : Optional[Any] = collections.OrderedDict(sorted(self.encoder.items() ,key=lambda SCREAMING_SNAKE_CASE__: x[1]))
__lowerCamelCase : int = {v: k for k, v in self.encoder.items()}
__lowerCamelCase : Union[str, Any] = WordpieceTokenizer(vocab=self.encoder ,unk_token=self.unk_token)
@property
def lowerCAmelCase ( self : List[Any]):
return self.encoder[self.bod_token]
@property
def lowerCAmelCase ( self : Tuple):
return self.encoder[self.eod_token]
@property
def lowerCAmelCase ( self : Union[str, Any]):
return self.encoder["\n"]
@property
def lowerCAmelCase ( self : str):
return len(self.encoder)
def lowerCAmelCase ( self : str):
return dict(self.encoder ,**self.added_tokens_encoder)
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]):
__lowerCamelCase : Any = []
for x in jieba.cut(SCREAMING_SNAKE_CASE__ ,cut_all=SCREAMING_SNAKE_CASE__):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(SCREAMING_SNAKE_CASE__))
return output_tokens
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Tuple ,**SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : Tuple = [i for i in token_ids if i >= 0]
__lowerCamelCase : str = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : List[Any]):
return token in self.encoder
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[str]):
return "".join(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[Any]):
return self.encoder.get(SCREAMING_SNAKE_CASE__ ,self.encoder.get(self.unk_token))
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any]):
return self.decoder.get(SCREAMING_SNAKE_CASE__ ,self.unk_token)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None):
if os.path.isdir(SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Any = os.path.join(
SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
else:
__lowerCamelCase : int = (filename_prefix + '-' if filename_prefix else '') + save_directory
__lowerCamelCase : Any = 0
if " " in self.encoder:
__lowerCamelCase : Any = self.encoder[' ']
del self.encoder[" "]
if "\n" in self.encoder:
__lowerCamelCase : str = self.encoder['\n']
del self.encoder["\n"]
__lowerCamelCase : str = collections.OrderedDict(sorted(self.encoder.items() ,key=lambda SCREAMING_SNAKE_CASE__: x[1]))
with open(SCREAMING_SNAKE_CASE__ ,'w' ,encoding='utf-8') as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
' Please check that the vocabulary is not corrupted!')
__lowerCamelCase : Any = token_index
writer.write(token + '\n')
index += 1
return (vocab_file,)
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : List[int] = None):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ,SCREAMING_SNAKE_CASE__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ ,token_ids_a=SCREAMING_SNAKE_CASE__ ,already_has_special_tokens=SCREAMING_SNAKE_CASE__)
if token_ids_a is not None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__))
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__))
| 652 | 0 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
__lowerCAmelCase : List[str] ={
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = '''git_vision_model'''
def __init__( self :List[str] , lowercase_ :Optional[Any]=7_68 , lowercase_ :str=30_72 , lowercase_ :int=12 , lowercase_ :Dict=12 , lowercase_ :Dict=3 , lowercase_ :Dict=2_24 , lowercase_ :Dict=16 , lowercase_ :List[str]="quick_gelu" , lowercase_ :int=1E-5 , lowercase_ :Union[str, Any]=0.0 , lowercase_ :int=0.0_2 , **lowercase_ :Union[str, Any] , )-> Optional[Any]:
super().__init__(**SCREAMING_SNAKE_CASE__ )
A__ = hidden_size
A__ = intermediate_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = num_channels
A__ = patch_size
A__ = image_size
A__ = initializer_range
A__ = attention_dropout
A__ = layer_norm_eps
A__ = hidden_act
@classmethod
def UpperCAmelCase_ ( cls :Optional[Any] , lowercase_ :Union[str, os.PathLike] , **lowercase_ :List[Any] )-> List[str]:
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
A__ = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
A__ = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = '''git'''
def __init__( self :Optional[int] , lowercase_ :int=None , lowercase_ :Union[str, Any]=3_05_22 , lowercase_ :List[str]=7_68 , lowercase_ :List[str]=6 , lowercase_ :Union[str, Any]=12 , lowercase_ :Dict=30_72 , lowercase_ :int="gelu" , lowercase_ :Any=0.1 , lowercase_ :Any=0.1 , lowercase_ :Any=10_24 , lowercase_ :Optional[int]=0.0_2 , lowercase_ :Tuple=1E-12 , lowercase_ :Union[str, Any]=0 , lowercase_ :str="absolute" , lowercase_ :int=True , lowercase_ :Optional[int]=False , lowercase_ :str=1_01 , lowercase_ :Tuple=1_02 , lowercase_ :Dict=None , **lowercase_ :str , )-> str:
super().__init__(bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if vision_config is None:
A__ = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
A__ = GitVisionConfig(**SCREAMING_SNAKE_CASE__ )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = tie_word_embeddings
A__ = num_image_with_embedding
A__ = bos_token_id
A__ = eos_token_id
def UpperCAmelCase_ ( self :Union[str, Any] )-> Tuple:
A__ = copy.deepcopy(self.__dict__ )
A__ = self.vision_config.to_dict()
A__ = self.__class__.model_type
return output
| 440 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a ={"""configuration_mmbt""": ["""MMBTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 652 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
__UpperCAmelCase = False
@skip_mps
class SCREAMING_SNAKE_CASE ( a_ , a_ , a_ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] =StableDiffusionAttendAndExcitePipeline
lowerCamelCase : Optional[Any] =False
lowerCamelCase : List[str] =TEXT_TO_IMAGE_PARAMS
lowerCamelCase : int =TEXT_TO_IMAGE_BATCH_PARAMS.union({"token_indices"} )
lowerCamelCase : int =TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase : Optional[int] =TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().setUpClass()
torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE__ )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Tuple ) -> List[str]:
"""simple docstring"""
super().tearDownClass()
torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
__lowerCAmelCase : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=SCREAMING_SNAKE_CASE__ , )
__lowerCAmelCase : str = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , )
torch.manual_seed(0 )
__lowerCAmelCase : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
__lowerCAmelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
__lowerCAmelCase : List[Any] = CLIPTextModel(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase : Any = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCAmelCase : str = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any]=0 ) -> List[Any]:
"""simple docstring"""
if str(SCREAMING_SNAKE_CASE__ ).startswith("""mps""" ):
__lowerCAmelCase : Optional[int] = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
__lowerCAmelCase : Optional[int] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase : Dict = {
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Dict = 'cpu'
__lowerCAmelCase : Dict = self.get_dummy_components()
__lowerCAmelCase : List[str] = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase : Optional[int] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase : Any = pipe(**SCREAMING_SNAKE_CASE__ ).images
__lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
__lowerCAmelCase : Optional[Any] = np.array(
[0.6390_5364, 0.6289_7307, 0.4859_9017, 0.513_3624, 0.555_0048, 0.4576_9516, 0.5032_6973, 0.502_3139, 0.4538_4496] )
__lowerCAmelCase : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE__ , 1e-3 )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
"""simple docstring"""
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
"""simple docstring"""
super().test_save_load_local(expected_max_difference=5e-4 )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int ) -> Optional[int]:
"""simple docstring"""
super().setUpClass()
torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE__ )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Dict ) -> List[Any]:
"""simple docstring"""
super().tearDownClass()
torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = torch.manual_seed(51 )
__lowerCAmelCase : Union[str, Any] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , safety_checker=SCREAMING_SNAKE_CASE__ , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
__lowerCAmelCase : int = 'a painting of an elephant with glasses'
__lowerCAmelCase : Tuple = [5, 7]
__lowerCAmelCase : Union[str, Any] = pipe(
prompt=SCREAMING_SNAKE_CASE__ , token_indices=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0]
__lowerCAmelCase : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 651 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : int = (UnCLIPScheduler,)
def lowerCAmelCase ( self : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : Any = {
'num_train_timesteps': 1_0_0_0,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**SCREAMING_SNAKE_CASE__)
return config
def lowerCAmelCase ( self : Optional[Any]):
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[Any]):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Union[str, Any]):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple):
for clip_sample_range in [1, 5, 1_0, 2_0]:
self.check_over_configs(clip_sample_range=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any]):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict):
for time_step in [0, 5_0_0, 9_9_9]:
for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ ,prev_timestep=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : Optional[int] = self.scheduler_classes[0]
__lowerCamelCase : Any = self.get_scheduler_config(variance_type='fixed_small_log')
__lowerCamelCase : Dict = scheduler_class(**SCREAMING_SNAKE_CASE__)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 1.00_00E-10)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7) - 0.0549625)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9) - 0.9994987)) < 1E-5
def lowerCAmelCase ( self : Any):
__lowerCamelCase : Dict = self.scheduler_classes[0]
__lowerCamelCase : List[str] = self.get_scheduler_config(variance_type='learned_range')
__lowerCamelCase : int = scheduler_class(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = 0.5
assert scheduler._get_variance(1 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -10.1712790 < 1E-5
assert scheduler._get_variance(4_8_7 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -5.7998052 < 1E-5
assert scheduler._get_variance(9_9_9 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -0.0010011 < 1E-5
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : str = self.scheduler_classes[0]
__lowerCamelCase : str = self.get_scheduler_config()
__lowerCamelCase : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = scheduler.timesteps
__lowerCamelCase : Union[str, Any] = self.dummy_model()
__lowerCamelCase : Optional[Any] = self.dummy_sample_deter
__lowerCamelCase : List[str] = torch.manual_seed(0)
for i, t in enumerate(SCREAMING_SNAKE_CASE__):
# 1. predict noise residual
__lowerCamelCase : int = model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
# 2. predict previous mean of sample x_t-1
__lowerCamelCase : Optional[int] = scheduler.step(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__).prev_sample
__lowerCamelCase : Optional[Any] = pred_prev_sample
__lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__))
assert abs(result_sum.item() - 252.2682495) < 1E-2
assert abs(result_mean.item() - 0.3284743) < 1E-3
def lowerCAmelCase ( self : str):
__lowerCamelCase : str = self.scheduler_classes[0]
__lowerCamelCase : List[Any] = self.get_scheduler_config()
__lowerCamelCase : int = scheduler_class(**SCREAMING_SNAKE_CASE__)
scheduler.set_timesteps(2_5)
__lowerCamelCase : int = scheduler.timesteps
__lowerCamelCase : Tuple = self.dummy_model()
__lowerCamelCase : Any = self.dummy_sample_deter
__lowerCamelCase : Any = torch.manual_seed(0)
for i, t in enumerate(SCREAMING_SNAKE_CASE__):
# 1. predict noise residual
__lowerCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
if i + 1 == timesteps.shape[0]:
__lowerCamelCase : Optional[Any] = None
else:
__lowerCamelCase : Union[str, Any] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__lowerCamelCase : int = scheduler.step(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,prev_timestep=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__).prev_sample
__lowerCamelCase : Union[str, Any] = pred_prev_sample
__lowerCamelCase : Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__))
assert abs(result_sum.item() - 258.2044983) < 1E-2
assert abs(result_mean.item() - 0.3362038) < 1E-3
def lowerCAmelCase ( self : List[Any]):
pass
def lowerCAmelCase ( self : Union[str, Any]):
pass
| 652 | 0 |
'''simple docstring'''
import qiskit
def SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple) -> qiskit.result.counts.Counts:
'''simple docstring'''
_lowercase : List[str] = qiskit.Aer.get_backend('aer_simulator')
# Create a Quantum Circuit acting on the q register
_lowercase : int = qiskit.QuantumCircuit(lowerCamelCase__ , lowerCamelCase__)
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0])
# Execute the circuit on the simulator
_lowercase : str = qiskit.execute(lowerCamelCase__ , lowerCamelCase__ , shots=10_00)
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowerCamelCase__)
if __name__ == "__main__":
print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""") | 125 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a =logging.get_logger(__name__)
a ={
"""caidas/swin2sr-classicalsr-x2-64""": (
"""https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"""
),
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Optional[int] = '''swin2sr'''
_UpperCAmelCase : Any = {
'''hidden_size''': '''embed_dim''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]=6_4 ,SCREAMING_SNAKE_CASE__ : Optional[int]=1 ,SCREAMING_SNAKE_CASE__ : List[Any]=3 ,SCREAMING_SNAKE_CASE__ : Tuple=1_8_0 ,SCREAMING_SNAKE_CASE__ : Any=[6, 6, 6, 6, 6, 6] ,SCREAMING_SNAKE_CASE__ : int=[6, 6, 6, 6, 6, 6] ,SCREAMING_SNAKE_CASE__ : Optional[Any]=8 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=2.0 ,SCREAMING_SNAKE_CASE__ : Optional[int]=True ,SCREAMING_SNAKE_CASE__ : Any=0.0 ,SCREAMING_SNAKE_CASE__ : Any=0.0 ,SCREAMING_SNAKE_CASE__ : List[str]=0.1 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" ,SCREAMING_SNAKE_CASE__ : Any=False ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.02 ,SCREAMING_SNAKE_CASE__ : Dict=1E-5 ,SCREAMING_SNAKE_CASE__ : Dict=2 ,SCREAMING_SNAKE_CASE__ : Tuple=1.0 ,SCREAMING_SNAKE_CASE__ : int="1conv" ,SCREAMING_SNAKE_CASE__ : Optional[int]="pixelshuffle" ,**SCREAMING_SNAKE_CASE__ : Optional[int] ,):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = image_size
__lowerCamelCase : str = patch_size
__lowerCamelCase : List[Any] = num_channels
__lowerCamelCase : Dict = embed_dim
__lowerCamelCase : Dict = depths
__lowerCamelCase : Any = len(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = num_heads
__lowerCamelCase : Tuple = window_size
__lowerCamelCase : Dict = mlp_ratio
__lowerCamelCase : str = qkv_bias
__lowerCamelCase : Optional[int] = hidden_dropout_prob
__lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
__lowerCamelCase : List[Any] = drop_path_rate
__lowerCamelCase : Optional[int] = hidden_act
__lowerCamelCase : Dict = use_absolute_embeddings
__lowerCamelCase : Optional[Any] = layer_norm_eps
__lowerCamelCase : str = initializer_range
__lowerCamelCase : List[Any] = upscale
__lowerCamelCase : List[Any] = img_range
__lowerCamelCase : List[str] = resi_connection
__lowerCamelCase : Union[str, Any] = upsampler
| 652 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 181 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=True , lowerCamelCase__="pt" ) -> Dict:
__lowerCamelCase : Any = {'add_prefix_space': True} if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and not line.startswith(' ' ) else {}
__lowerCamelCase : int = padding_side
return tokenizer(
[line] , max_length=lowerCamelCase__ , padding='max_length' if pad_to_max_length else None , truncation=lowerCamelCase__ , return_tensors=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , ) -> List[str]:
__lowerCamelCase : List[str] = input_ids.ne(lowerCamelCase__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]="train" ,SCREAMING_SNAKE_CASE__ : Tuple=None ,SCREAMING_SNAKE_CASE__ : Dict=None ,SCREAMING_SNAKE_CASE__ : int=None ,SCREAMING_SNAKE_CASE__ : List[Any]="" ,):
super().__init__()
__lowerCamelCase : Optional[Any] = Path(SCREAMING_SNAKE_CASE__).joinpath(type_path + '.source')
__lowerCamelCase : Any = Path(SCREAMING_SNAKE_CASE__).joinpath(type_path + '.target')
__lowerCamelCase : List[Any] = self.get_char_lens(self.src_file)
__lowerCamelCase : List[Any] = max_source_length
__lowerCamelCase : List[str] = max_target_length
assert min(self.src_lens) > 0, F"found empty line in {self.src_file}"
__lowerCamelCase : Any = tokenizer
__lowerCamelCase : Optional[int] = prefix
if n_obs is not None:
__lowerCamelCase : Dict = self.src_lens[:n_obs]
__lowerCamelCase : str = src_lang
__lowerCamelCase : Any = tgt_lang
def __len__( self : Tuple):
return len(self.src_lens)
def __getitem__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Dict = index + 1 # linecache starts at 1
__lowerCamelCase : Any = self.prefix + linecache.getline(str(self.src_file) ,SCREAMING_SNAKE_CASE__).rstrip('\n')
__lowerCamelCase : int = linecache.getline(str(self.tgt_file) ,SCREAMING_SNAKE_CASE__).rstrip('\n')
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__lowerCamelCase : Dict = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__) else self.tokenizer
)
__lowerCamelCase : Optional[int] = self.tokenizer.generator if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__) else self.tokenizer
__lowerCamelCase : List[str] = encode_line(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.max_source_length ,'right')
__lowerCamelCase : Any = encode_line(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.max_target_length ,'right')
__lowerCamelCase : List[Any] = source_inputs['input_ids'].squeeze()
__lowerCamelCase : Tuple = target_inputs['input_ids'].squeeze()
__lowerCamelCase : Tuple = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : int):
return [len(SCREAMING_SNAKE_CASE__) for x in Path(SCREAMING_SNAKE_CASE__).open().readlines()]
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : Optional[Any] = torch.stack([x['input_ids'] for x in batch])
__lowerCamelCase : Any = torch.stack([x['attention_mask'] for x in batch])
__lowerCamelCase : Union[str, Any] = torch.stack([x['decoder_input_ids'] for x in batch])
__lowerCamelCase : Optional[int] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__)
else self.tokenizer.pad_token_id
)
__lowerCamelCase : int = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__)
else self.tokenizer.pad_token_id
)
__lowerCamelCase : int = trim_batch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase : int = trim_batch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
a =getLogger(__name__)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any:
return list(itertools.chain.from_iterable(lowerCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None:
__lowerCamelCase : str = get_git_info()
save_json(lowerCamelCase__ , os.path.join(lowerCamelCase__ , 'git_log.json' ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=4 , **lowerCamelCase__ ) -> List[str]:
with open(lowerCamelCase__ , 'w' ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ , indent=lowerCamelCase__ , **lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
with open(lowerCamelCase__ ) as f:
return json.load(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
__lowerCamelCase : str = git.Repo(search_parent_directories=lowerCamelCase__ )
__lowerCamelCase : Any = {
'repo_id': str(lowerCamelCase__ ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List:
return list(map(lowerCamelCase__ , lowerCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
with open(lowerCamelCase__ , 'wb' ) as f:
return pickle.dump(lowerCamelCase__ , lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
def remove_articles(lowerCamelCase__ ):
return re.sub(R'\b(a|an|the)\b' , ' ' , lowerCamelCase__ )
def white_space_fix(lowerCamelCase__ ):
return " ".join(text.split() )
def remove_punc(lowerCamelCase__ ):
__lowerCamelCase : Dict = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCamelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase__ ) ) ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
__lowerCamelCase : str = normalize_answer(lowerCamelCase__ ).split()
__lowerCamelCase : Optional[int] = normalize_answer(lowerCamelCase__ ).split()
__lowerCamelCase : Union[str, Any] = Counter(lowerCamelCase__ ) & Counter(lowerCamelCase__ )
__lowerCamelCase : Any = sum(common.values() )
if num_same == 0:
return 0
__lowerCamelCase : List[Any] = 1.0 * num_same / len(lowerCamelCase__ )
__lowerCamelCase : int = 1.0 * num_same / len(lowerCamelCase__ )
__lowerCamelCase : Optional[Any] = (2 * precision * recall) / (precision + recall)
return fa
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
return normalize_answer(lowerCamelCase__ ) == normalize_answer(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
assert len(lowerCamelCase__ ) == len(lowerCamelCase__ )
__lowerCamelCase : Dict = 0
for hypo, pred in zip(lowerCamelCase__ , lowerCamelCase__ ):
em += exact_match_score(lowerCamelCase__ , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
em /= len(lowerCamelCase__ )
return {"em": em}
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
return model_prefix.startswith('rag' )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
__lowerCamelCase : Optional[int] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__lowerCamelCase : List[str] = 'dropout_rate'
for p in extra_params:
if getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if not hasattr(lowerCamelCase__ , lowerCamelCase__ ) and not hasattr(lowerCamelCase__ , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(lowerCamelCase__ ) )
delattr(lowerCamelCase__ , lowerCamelCase__ )
continue
__lowerCamelCase : List[Any] = p if hasattr(lowerCamelCase__ , lowerCamelCase__ ) else equivalent_param[p]
setattr(lowerCamelCase__ , lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
delattr(lowerCamelCase__ , lowerCamelCase__ )
return hparams, config
| 652 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCamelCase__ ( snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = MobileBertTokenizer
SCREAMING_SNAKE_CASE = MobileBertTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = filter_non_english
SCREAMING_SNAKE_CASE = '''google/mobilebert-uncased'''
def _UpperCamelCase ( self ):
super().setUp()
UpperCAmelCase = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
UpperCAmelCase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
UpperCAmelCase = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = 'UNwant\u00E9d,running'
UpperCAmelCase = 'unwanted, running'
return input_text, output_text
def _UpperCamelCase ( self ):
UpperCAmelCase = self.tokenizer_class(self.vocab_file )
UpperCAmelCase = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) ,[9, 6, 7, 12, 10, 11] )
def _UpperCamelCase ( self ):
if not self.test_rust_tokenizer:
return
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = 'UNwant\u00E9d,running'
UpperCAmelCase = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = tokenizer.encode(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# With lower casing
UpperCAmelCase = self.get_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = self.get_rust_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = 'UNwant\u00E9d,running'
UpperCAmelCase = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = self.get_rust_tokenizer()
UpperCAmelCase = tokenizer.encode(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ):
UpperCAmelCase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) ,["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ ,strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""h\u00E9llo"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ ,strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) ,["""hello"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ ,strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ ,strip_accents=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) ,["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ ,never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) ,["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
UpperCAmelCase = {}
for i, token in enumerate(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase = i
UpperCAmelCase = WordpieceTokenizer(vocab=SCREAMING_SNAKE_CASE__ ,unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) ,[] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) ,["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) ,["""[UNK]""", """runn""", """##ing"""] )
def _UpperCamelCase ( self ):
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def _UpperCamelCase ( self ):
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def _UpperCamelCase ( self ):
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.get_tokenizer()
UpperCAmelCase = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) for t in ["""Test""", """\xad""", """test"""]] ,[["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__ ) for t in ["""Test""", """\xad""", """test"""]] ,[["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def _UpperCamelCase ( self ):
UpperCAmelCase = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""" )
UpperCAmelCase = tokenizer.encode("""sequence builders""" ,add_special_tokens=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = tokenizer.encode("""multi-sequence build""" ,add_special_tokens=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def _UpperCamelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
UpperCAmelCase = tokenizer_r.encode_plus(
SCREAMING_SNAKE_CASE__ ,return_attention_mask=SCREAMING_SNAKE_CASE__ ,return_token_type_ids=SCREAMING_SNAKE_CASE__ ,return_offsets_mapping=SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__ ,)
UpperCAmelCase = tokenizer_r.do_lower_case if hasattr(SCREAMING_SNAKE_CASE__ ,"""do_lower_case""" ) else False
UpperCAmelCase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] ,tokens["""offset_mapping"""] )
def _UpperCamelCase ( self ):
UpperCAmelCase = ['的', '人', '有']
UpperCAmelCase = ''.join(SCREAMING_SNAKE_CASE__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase = True
UpperCAmelCase = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = tokenizer_p.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = tokenizer_r.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = False
UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = tokenizer_r.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = tokenizer_p.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCAmelCase = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(SCREAMING_SNAKE_CASE__ )
]
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
| 341 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
a =logging.get_logger(__name__)
# General docstring
a ="""MobileNetV1Config"""
# Base docstring
a ="""google/mobilenet_v1_1.0_224"""
a =[1, 1024, 7, 7]
# Image classification docstring
a ="""google/mobilenet_v1_1.0_224"""
a ="""tabby, tabby cat"""
a =[
"""google/mobilenet_v1_1.0_224""",
"""google/mobilenet_v1_0.75_192""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ) -> str:
__lowerCamelCase : str = {}
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase : int = model.mobilenet_va
else:
__lowerCamelCase : List[str] = model
__lowerCamelCase : List[Any] = 'MobilenetV1/Conv2d_0/'
__lowerCamelCase : List[Any] = backbone.conv_stem.convolution.weight
__lowerCamelCase : List[str] = backbone.conv_stem.normalization.bias
__lowerCamelCase : Tuple = backbone.conv_stem.normalization.weight
__lowerCamelCase : Union[str, Any] = backbone.conv_stem.normalization.running_mean
__lowerCamelCase : Optional[int] = backbone.conv_stem.normalization.running_var
for i in range(1_3 ):
__lowerCamelCase : Any = i + 1
__lowerCamelCase : Union[str, Any] = i * 2
__lowerCamelCase : Optional[Any] = backbone.layer[pt_index]
__lowerCamelCase : Optional[int] = F"MobilenetV1/Conv2d_{tf_index}_depthwise/"
__lowerCamelCase : Tuple = pointer.convolution.weight
__lowerCamelCase : Optional[Any] = pointer.normalization.bias
__lowerCamelCase : Union[str, Any] = pointer.normalization.weight
__lowerCamelCase : List[str] = pointer.normalization.running_mean
__lowerCamelCase : Union[str, Any] = pointer.normalization.running_var
__lowerCamelCase : int = backbone.layer[pt_index + 1]
__lowerCamelCase : Union[str, Any] = F"MobilenetV1/Conv2d_{tf_index}_pointwise/"
__lowerCamelCase : Optional[Any] = pointer.convolution.weight
__lowerCamelCase : Any = pointer.normalization.bias
__lowerCamelCase : str = pointer.normalization.weight
__lowerCamelCase : Dict = pointer.normalization.running_mean
__lowerCamelCase : List[str] = pointer.normalization.running_var
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase : Union[str, Any] = 'MobilenetV1/Logits/Conv2d_1c_1x1/'
__lowerCamelCase : Any = model.classifier.weight
__lowerCamelCase : int = model.classifier.bias
return tf_to_pt_map
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.' )
raise
# Load weights from TF model
__lowerCamelCase : List[str] = tf.train.list_variables(lowerCamelCase__ )
__lowerCamelCase : List[str] = {}
for name, shape in init_vars:
logger.info(F"Loading TF weight {name} with shape {shape}" )
__lowerCamelCase : Any = tf.train.load_variable(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : List[Any] = array
# Build TF to PyTorch weights loading map
__lowerCamelCase : Tuple = _build_tf_to_pytorch_map(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
for name, pointer in tf_to_pt_map.items():
logger.info(F"Importing {name}" )
if name not in tf_weights:
logger.info(F"{name} not in tf pre-trained weights, skipping" )
continue
__lowerCamelCase : Optional[int] = tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise' )
__lowerCamelCase : List[str] = np.transpose(lowerCamelCase__ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('Transposing' )
if len(pointer.shape ) == 2: # copying into linear layer
__lowerCamelCase : Any = array.squeeze().transpose()
else:
__lowerCamelCase : Tuple = np.transpose(lowerCamelCase__ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" )
logger.info(F"Initialize PyTorch weight {name} {array.shape}" )
__lowerCamelCase : Optional[Any] = torch.from_numpy(lowerCamelCase__ )
tf_weights.pop(lowerCamelCase__ , lowerCamelCase__ )
tf_weights.pop(name + '/RMSProp' , lowerCamelCase__ )
tf_weights.pop(name + '/RMSProp_1' , lowerCamelCase__ )
tf_weights.pop(name + '/ExponentialMovingAverage' , lowerCamelCase__ )
logger.info(F"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" )
return model
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> torch.Tensor:
__lowerCamelCase , __lowerCamelCase : int = features.shape[-2:]
__lowerCamelCase , __lowerCamelCase : List[str] = conv_layer.stride
__lowerCamelCase , __lowerCamelCase : str = conv_layer.kernel_size
if in_height % stride_height == 0:
__lowerCamelCase : Optional[int] = max(kernel_height - stride_height , 0 )
else:
__lowerCamelCase : Union[str, Any] = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
__lowerCamelCase : List[str] = max(kernel_width - stride_width , 0 )
else:
__lowerCamelCase : List[str] = max(kernel_width - (in_width % stride_width) , 0 )
__lowerCamelCase : List[str] = pad_along_width // 2
__lowerCamelCase : Optional[int] = pad_along_width - pad_left
__lowerCamelCase : Any = pad_along_height // 2
__lowerCamelCase : List[Any] = pad_along_height - pad_top
__lowerCamelCase : Union[str, Any] = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(lowerCamelCase__ , lowerCamelCase__ , 'constant' , 0.0 )
class A_ ( nn.Module ):
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : MobileNetVaConfig ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Optional[int] = 1 ,SCREAMING_SNAKE_CASE__ : Optional[int] = 1 ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : Optional[bool] = True ,SCREAMING_SNAKE_CASE__ : Optional[bool or str] = True ,):
super().__init__()
__lowerCamelCase : Dict = config
if in_channels % groups != 0:
raise ValueError(F"Input channels ({in_channels}) are not divisible by {groups} groups.")
if out_channels % groups != 0:
raise ValueError(F"Output channels ({out_channels}) are not divisible by {groups} groups.")
__lowerCamelCase : Optional[Any] = 0 if config.tf_padding else int((kernel_size - 1) / 2)
__lowerCamelCase : Optional[int] = nn.Convad(
in_channels=SCREAMING_SNAKE_CASE__ ,out_channels=SCREAMING_SNAKE_CASE__ ,kernel_size=SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,padding=SCREAMING_SNAKE_CASE__ ,groups=SCREAMING_SNAKE_CASE__ ,bias=SCREAMING_SNAKE_CASE__ ,padding_mode='zeros' ,)
if use_normalization:
__lowerCamelCase : Optional[int] = nn.BatchNormad(
num_features=SCREAMING_SNAKE_CASE__ ,eps=config.layer_norm_eps ,momentum=0.9997 ,affine=SCREAMING_SNAKE_CASE__ ,track_running_stats=SCREAMING_SNAKE_CASE__ ,)
else:
__lowerCamelCase : Dict = None
if use_activation:
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Dict = ACTaFN[use_activation]
elif isinstance(config.hidden_act ,SCREAMING_SNAKE_CASE__):
__lowerCamelCase : str = ACTaFN[config.hidden_act]
else:
__lowerCamelCase : List[str] = config.hidden_act
else:
__lowerCamelCase : List[str] = None
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : torch.Tensor):
if self.config.tf_padding:
__lowerCamelCase : Any = apply_tf_padding(SCREAMING_SNAKE_CASE__ ,self.convolution)
__lowerCamelCase : Optional[int] = self.convolution(SCREAMING_SNAKE_CASE__)
if self.normalization is not None:
__lowerCamelCase : Dict = self.normalization(SCREAMING_SNAKE_CASE__)
if self.activation is not None:
__lowerCamelCase : List[str] = self.activation(SCREAMING_SNAKE_CASE__)
return features
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Union[str, Any] = MobileNetVaConfig
_UpperCAmelCase : List[str] = load_tf_weights_in_mobilenet_va
_UpperCAmelCase : List[str] = '''mobilenet_v1'''
_UpperCAmelCase : Any = '''pixel_values'''
_UpperCAmelCase : int = False
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Union[nn.Linear, nn.Convad]):
if isinstance(SCREAMING_SNAKE_CASE__ ,(nn.Linear, nn.Convad)):
module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(SCREAMING_SNAKE_CASE__ ,nn.BatchNormad):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
a =r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
a =r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , SCREAMING_SNAKE_CASE , )
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : MobileNetVaConfig ,SCREAMING_SNAKE_CASE__ : bool = True):
super().__init__(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = config
__lowerCamelCase : Optional[int] = 3_2
__lowerCamelCase : List[str] = max(int(depth * config.depth_multiplier) ,config.min_depth)
__lowerCamelCase : Optional[Any] = MobileNetVaConvLayer(
SCREAMING_SNAKE_CASE__ ,in_channels=config.num_channels ,out_channels=SCREAMING_SNAKE_CASE__ ,kernel_size=3 ,stride=2 ,)
__lowerCamelCase : Any = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
__lowerCamelCase : str = nn.ModuleList()
for i in range(1_3):
__lowerCamelCase : str = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
__lowerCamelCase : str = max(int(depth * config.depth_multiplier) ,config.min_depth)
self.layer.append(
MobileNetVaConvLayer(
SCREAMING_SNAKE_CASE__ ,in_channels=SCREAMING_SNAKE_CASE__ ,out_channels=SCREAMING_SNAKE_CASE__ ,kernel_size=3 ,stride=strides[i] ,groups=SCREAMING_SNAKE_CASE__ ,))
self.layer.append(
MobileNetVaConvLayer(
SCREAMING_SNAKE_CASE__ ,in_channels=SCREAMING_SNAKE_CASE__ ,out_channels=SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,))
__lowerCamelCase : Optional[int] = nn.AdaptiveAvgPoolad((1, 1)) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Dict):
raise NotImplementedError
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=SCREAMING_SNAKE_CASE__ ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,):
__lowerCamelCase : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values')
__lowerCamelCase : Optional[Any] = self.conv_stem(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer):
__lowerCamelCase : Dict = layer_module(SCREAMING_SNAKE_CASE__)
if output_hidden_states:
__lowerCamelCase : Any = all_hidden_states + (hidden_states,)
__lowerCamelCase : Optional[Any] = hidden_states
if self.pooler is not None:
__lowerCamelCase : Tuple = torch.flatten(self.pooler(SCREAMING_SNAKE_CASE__) ,start_dim=1)
else:
__lowerCamelCase : List[str] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None)
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE__ ,pooler_output=SCREAMING_SNAKE_CASE__ ,hidden_states=SCREAMING_SNAKE_CASE__ ,)
@add_start_docstrings(
'''
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , SCREAMING_SNAKE_CASE , )
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : MobileNetVaConfig):
super().__init__(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = config.num_labels
__lowerCamelCase : Optional[Any] = MobileNetVaModel(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
__lowerCamelCase : Any = nn.Dropout(config.classifier_dropout_prob ,inplace=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = nn.Linear(SCREAMING_SNAKE_CASE__ ,config.num_labels) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=SCREAMING_SNAKE_CASE__ ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,):
__lowerCamelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase : Optional[int] = self.mobilenet_va(SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = outputs.pooler_output if return_dict else outputs[1]
__lowerCamelCase : List[str] = self.classifier(self.dropout(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : List[str] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowerCamelCase : Dict = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowerCamelCase : int = 'single_label_classification'
else:
__lowerCamelCase : Tuple = 'multi_label_classification'
if self.config.problem_type == "regression":
__lowerCamelCase : Tuple = MSELoss()
if self.num_labels == 1:
__lowerCamelCase : int = loss_fct(logits.squeeze() ,labels.squeeze())
else:
__lowerCamelCase : Union[str, Any] = loss_fct(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
elif self.config.problem_type == "single_label_classification":
__lowerCamelCase : List[str] = CrossEntropyLoss()
__lowerCamelCase : List[str] = loss_fct(logits.view(-1 ,self.num_labels) ,labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
__lowerCamelCase : int = BCEWithLogitsLoss()
__lowerCamelCase : int = loss_fct(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
if not return_dict:
__lowerCamelCase : List[str] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=SCREAMING_SNAKE_CASE__ ,logits=SCREAMING_SNAKE_CASE__ ,hidden_states=outputs.hidden_states ,)
| 652 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class snake_case__ :
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any]=13 , UpperCamelCase__ : str=30 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : str=3 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Union[str, Any]=32 , UpperCamelCase__ : int=5 , UpperCamelCase__ : Union[str, Any]=4 , UpperCamelCase__ : Optional[int]=37 , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : List[str]=10 , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : Tuple=None , ) -> str:
"""simple docstring"""
snake_case : Union[str, Any] = parent
snake_case : Any = batch_size
snake_case : int = image_size
snake_case : int = patch_size
snake_case : int = num_channels
snake_case : str = is_training
snake_case : Tuple = use_labels
snake_case : Tuple = hidden_size
snake_case : str = num_hidden_layers
snake_case : int = num_attention_heads
snake_case : Union[str, Any] = intermediate_size
snake_case : Any = hidden_act
snake_case : List[str] = hidden_dropout_prob
snake_case : Optional[int] = attention_probs_dropout_prob
snake_case : Dict = type_sequence_label_size
snake_case : Any = initializer_range
snake_case : List[str] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case : int = (image_size // patch_size) ** 2
snake_case : List[str] = num_patches + 1
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
snake_case : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case : Optional[int] = None
if self.use_labels:
snake_case : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowerCAmelCase ( self : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str ) -> str:
"""simple docstring"""
snake_case : str = ViTMSNModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
snake_case : Tuple = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple ) -> Any:
"""simple docstring"""
snake_case : str = self.type_sequence_label_size
snake_case : str = ViTMSNForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
snake_case : int = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
print('''Pixel and labels shape: {pixel_values.shape}, {labels.shape}''' )
print('''Labels: {labels}''' )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case : List[Any] = 1
snake_case : Optional[int] = ViTMSNForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
snake_case : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case : Dict = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
snake_case : Any = self.prepare_config_and_inputs()
snake_case : List[Any] = config_and_inputs
snake_case : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
lowerCamelCase = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
snake_case : Tuple = ViTMSNModelTester(self )
snake_case : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMSN does not use inputs_embeds''' )
def lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ , nn.Linear ) )
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case : str = model_class(SCREAMING_SNAKE_CASE__ )
snake_case : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case : Tuple = [*signature.parameters.keys()]
snake_case : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case : str = ViTMSNModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
snake_case : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('''facebook/vit-msn-small''' ) if is_vision_available() else None
@slow
def lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
torch.manual_seed(2 )
snake_case : Union[str, Any] = ViTMSNForImageClassification.from_pretrained('''facebook/vit-msn-small''' ).to(SCREAMING_SNAKE_CASE__ )
snake_case : Dict = self.default_image_processor
snake_case : List[Any] = prepare_img()
snake_case : int = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
snake_case : Any = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
snake_case : Any = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
snake_case : Dict = torch.tensor([-0.0_803, -0.4_454, -0.2_375] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
| 638 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray:
__lowerCamelCase : Optional[Any] = cva.getAffineTransform(lowerCamelCase__ , lowerCamelCase__ )
return cva.warpAffine(lowerCamelCase__ , lowerCamelCase__ , (rows, cols) )
if __name__ == "__main__":
# read original image
a =cva.imread(
str(Path(__file__).resolve().parent.parent / """image_data""" / """lena.jpg""")
)
# turn image in gray scale value
a =cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
a , a =gray_img.shape
# set different points to rotate image
a =np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
a =np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
a =np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
a =np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
a =[
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
a =plt.figure(1)
a =["""Original""", """Rotation 1""", """Rotation 2""", """Rotation 3"""]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, """gray""")
plt.title(titles[i])
plt.axis("""off""")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 652 | 0 |
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self , A="</s>" , A="<unk>" , A="<pad>" , A=1_25 , A=None , **A , ) -> Union[str, Any]:
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
lowerCamelCase = [F'<extra_id_{i}>' for i in range(SCREAMING_SNAKE_CASE__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
lowerCamelCase = len(set(filter(lambda A : bool("""extra_id""" in str(SCREAMING_SNAKE_CASE__ ) ) , SCREAMING_SNAKE_CASE__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
""" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"""
""" extra_ids tokens""" )
lowerCamelCase = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else pad_token
lowerCamelCase = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else eos_token
lowerCamelCase = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else unk_token
super().__init__(
eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , extra_ids=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase = extra_ids
lowerCamelCase = 2**8 # utf is 8 bits
# define special tokens dict
lowerCamelCase = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
lowerCamelCase = len(self.special_tokens_encoder )
lowerCamelCase = len(SCREAMING_SNAKE_CASE__ )
for i, token in enumerate(SCREAMING_SNAKE_CASE__ ):
lowerCamelCase = self.vocab_size + i - n
lowerCamelCase = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def __A ( self ) -> Tuple:
'''simple docstring'''
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def __A ( self , A , A = None , A = False ) -> int:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def __A ( self , A ) -> str:
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __A ( self , A , A = None ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __A ( self , A , A = None ) -> Dict:
'''simple docstring'''
lowerCamelCase = self._add_eos_if_not_present(SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return token_ids_a
else:
lowerCamelCase = self._add_eos_if_not_present(SCREAMING_SNAKE_CASE__ )
return token_ids_a + token_ids_a
def __A ( self , A ) -> int:
'''simple docstring'''
lowerCamelCase = [chr(SCREAMING_SNAKE_CASE__ ) for i in text.encode("""utf-8""" )]
return tokens
def __A ( self , A ) -> Optional[int]:
'''simple docstring'''
if token in self.special_tokens_encoder:
lowerCamelCase = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
lowerCamelCase = self.added_tokens_encoder[token]
elif len(SCREAMING_SNAKE_CASE__ ) != 1:
lowerCamelCase = self.unk_token_id
else:
lowerCamelCase = ord(SCREAMING_SNAKE_CASE__ ) + self._num_special_tokens
return token_id
def __A ( self , A ) -> Optional[Any]:
'''simple docstring'''
if index in self.special_tokens_decoder:
lowerCamelCase = self.special_tokens_decoder[index]
else:
lowerCamelCase = chr(index - self._num_special_tokens )
return token
def __A ( self , A ) -> str:
'''simple docstring'''
lowerCamelCase = b''
for token in tokens:
if token in self.special_tokens_decoder:
lowerCamelCase = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.added_tokens_decoder:
lowerCamelCase = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.special_tokens_encoder:
lowerCamelCase = token.encode("""utf-8""" )
elif token in self.added_tokens_encoder:
lowerCamelCase = token.encode("""utf-8""" )
else:
lowerCamelCase = bytes([ord(SCREAMING_SNAKE_CASE__ )] )
bstring += tok_string
lowerCamelCase = bstring.decode("""utf-8""" , errors="""ignore""" )
return string
def __A ( self , A , A = None ) -> Any:
'''simple docstring'''
return ()
| 457 |
import math
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase : List[str] = F"Input value of [number={number}] must be an integer"
raise TypeError(lowerCamelCase__ )
if number < 1:
__lowerCamelCase : int = F"Input value of [number={number}] must be > 0"
raise ValueError(lowerCamelCase__ )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__lowerCamelCase : Any = int(math.log(number // 3 , 2 ) ) + 2
__lowerCamelCase : List[Any] = [3, 5]
__lowerCamelCase : Union[str, Any] = 2
__lowerCamelCase : List[str] = 3
for block in range(1 , lowerCamelCase__ ):
for _ in range(lowerCamelCase__ ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
a =0
try:
a =proth(number)
except ValueError:
print(F"""ValueError: there is no {number}th Proth number""")
continue
print(F"""The {number}th Proth number: {value}""")
| 652 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Union[str, Any] = logging.get_logger(__name__)
_a : List[Any] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : List[str] ='''megatron-bert'''
def __init__( self,__SCREAMING_SNAKE_CASE=2_90_56,__SCREAMING_SNAKE_CASE=10_24,__SCREAMING_SNAKE_CASE=24,__SCREAMING_SNAKE_CASE=16,__SCREAMING_SNAKE_CASE=40_96,__SCREAMING_SNAKE_CASE="gelu",__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE=0.1,__SCREAMING_SNAKE_CASE=5_12,__SCREAMING_SNAKE_CASE=2,__SCREAMING_SNAKE_CASE=0.02,__SCREAMING_SNAKE_CASE=1e-12,__SCREAMING_SNAKE_CASE=0,__SCREAMING_SNAKE_CASE="absolute",__SCREAMING_SNAKE_CASE=True,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__,**SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = hidden_act
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = layer_norm_eps
__lowerCAmelCase = position_embedding_type
__lowerCAmelCase = use_cache
| 689 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class A_ ( unittest.TestCase ):
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : str=7 ,SCREAMING_SNAKE_CASE__ : Any=3 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=3_0 ,SCREAMING_SNAKE_CASE__ : int=4_0_0 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : Optional[int]=None ,SCREAMING_SNAKE_CASE__ : Any=True ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=[0.5, 0.5, 0.5] ,SCREAMING_SNAKE_CASE__ : Dict=[0.5, 0.5, 0.5] ,SCREAMING_SNAKE_CASE__ : List[str]=True ,SCREAMING_SNAKE_CASE__ : List[str]=1 / 2_5_5 ,SCREAMING_SNAKE_CASE__ : Tuple=True ,):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__lowerCamelCase : List[Any] = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}
__lowerCamelCase : str = parent
__lowerCamelCase : Union[str, Any] = batch_size
__lowerCamelCase : int = num_channels
__lowerCamelCase : Dict = min_resolution
__lowerCamelCase : Tuple = max_resolution
__lowerCamelCase : Dict = do_resize
__lowerCamelCase : List[Any] = size
__lowerCamelCase : Tuple = do_normalize
__lowerCamelCase : Any = image_mean
__lowerCamelCase : List[str] = image_std
__lowerCamelCase : List[Any] = do_rescale
__lowerCamelCase : str = rescale_factor
__lowerCamelCase : Tuple = do_pad
def lowerCAmelCase ( self : Dict):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : List[str]=False):
if not batched:
__lowerCamelCase : Optional[Any] = image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE__ ,Image.Image):
__lowerCamelCase , __lowerCamelCase : Any = image.size
else:
__lowerCamelCase , __lowerCamelCase : Any = image.shape[1], image.shape[2]
if w < h:
__lowerCamelCase : Optional[int] = int(self.size['shortest_edge'] * h / w)
__lowerCamelCase : Tuple = self.size['shortest_edge']
elif w > h:
__lowerCamelCase : Union[str, Any] = self.size['shortest_edge']
__lowerCamelCase : Union[str, Any] = int(self.size['shortest_edge'] * w / h)
else:
__lowerCamelCase : List[Any] = self.size['shortest_edge']
__lowerCamelCase : Optional[int] = self.size['shortest_edge']
else:
__lowerCamelCase : List[str] = []
for image in image_inputs:
__lowerCamelCase , __lowerCamelCase : List[Any] = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
__lowerCamelCase : Tuple = max(SCREAMING_SNAKE_CASE__ ,key=lambda SCREAMING_SNAKE_CASE__: item[0])[0]
__lowerCamelCase : Dict = max(SCREAMING_SNAKE_CASE__ ,key=lambda SCREAMING_SNAKE_CASE__: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : Optional[int] = DetaImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : List[str] = DetaImageProcessingTester(self)
@property
def lowerCAmelCase ( self : Any):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'image_mean'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'image_std'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_normalize'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_resize'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_rescale'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_pad'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'size'))
def lowerCAmelCase ( self : str):
__lowerCamelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size ,{'shortest_edge': 1_8, 'longest_edge': 1_3_3_3})
self.assertEqual(image_processor.do_pad ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Any):
pass
def lowerCAmelCase ( self : List[str]):
# Initialize image_processing
__lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__lowerCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,Image.Image)
# Test not batched input
__lowerCamelCase : List[Any] = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values
__lowerCamelCase , __lowerCamelCase : Tuple = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__)
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def lowerCAmelCase ( self : str):
# Initialize image_processing
__lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__lowerCamelCase : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__ ,numpify=SCREAMING_SNAKE_CASE__)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,np.ndarray)
# Test not batched input
__lowerCamelCase : Tuple = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values
__lowerCamelCase , __lowerCamelCase : int = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__)
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
__lowerCamelCase : str = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values
__lowerCamelCase , __lowerCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__)
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def lowerCAmelCase ( self : int):
# Initialize image_processing
__lowerCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__lowerCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__ ,torchify=SCREAMING_SNAKE_CASE__)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,torch.Tensor)
# Test not batched input
__lowerCamelCase : Union[str, Any] = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values
__lowerCamelCase , __lowerCamelCase : int = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__)
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
__lowerCamelCase : List[Any] = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values
__lowerCamelCase , __lowerCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__)
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def lowerCAmelCase ( self : Optional[Any]):
# prepare image and target
__lowerCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' ,'r') as f:
__lowerCamelCase : List[str] = json.loads(f.read())
__lowerCamelCase : Union[str, Any] = {'image_id': 3_9_7_6_9, 'annotations': target}
# encode them
__lowerCamelCase : Optional[int] = DetaImageProcessor()
__lowerCamelCase : int = image_processing(images=SCREAMING_SNAKE_CASE__ ,annotations=SCREAMING_SNAKE_CASE__ ,return_tensors='pt')
# verify pixel values
__lowerCamelCase : List[str] = torch.Size([1, 3, 8_0_0, 1_0_6_6])
self.assertEqual(encoding['pixel_values'].shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
# verify area
__lowerCamelCase : Dict = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,SCREAMING_SNAKE_CASE__))
# verify boxes
__lowerCamelCase : int = torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,SCREAMING_SNAKE_CASE__ ,atol=1E-3))
# verify image_id
__lowerCamelCase : Tuple = torch.tensor([3_9_7_6_9])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,SCREAMING_SNAKE_CASE__))
# verify is_crowd
__lowerCamelCase : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,SCREAMING_SNAKE_CASE__))
# verify class_labels
__lowerCamelCase : List[Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,SCREAMING_SNAKE_CASE__))
# verify orig_size
__lowerCamelCase : str = torch.tensor([4_8_0, 6_4_0])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,SCREAMING_SNAKE_CASE__))
# verify size
__lowerCamelCase : int = torch.tensor([8_0_0, 1_0_6_6])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,SCREAMING_SNAKE_CASE__))
@slow
def lowerCAmelCase ( self : str):
# prepare image, target and masks_path
__lowerCamelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' ,'r') as f:
__lowerCamelCase : Tuple = json.loads(f.read())
__lowerCamelCase : List[Any] = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target}
__lowerCamelCase : Optional[int] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic')
# encode them
__lowerCamelCase : List[str] = DetaImageProcessor(format='coco_panoptic')
__lowerCamelCase : Optional[Any] = image_processing(images=SCREAMING_SNAKE_CASE__ ,annotations=SCREAMING_SNAKE_CASE__ ,masks_path=SCREAMING_SNAKE_CASE__ ,return_tensors='pt')
# verify pixel values
__lowerCamelCase : List[str] = torch.Size([1, 3, 8_0_0, 1_0_6_6])
self.assertEqual(encoding['pixel_values'].shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
# verify area
__lowerCamelCase : Optional[Any] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,SCREAMING_SNAKE_CASE__))
# verify boxes
__lowerCamelCase : Tuple = torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,SCREAMING_SNAKE_CASE__ ,atol=1E-3))
# verify image_id
__lowerCamelCase : int = torch.tensor([3_9_7_6_9])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,SCREAMING_SNAKE_CASE__))
# verify is_crowd
__lowerCamelCase : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,SCREAMING_SNAKE_CASE__))
# verify class_labels
__lowerCamelCase : int = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,SCREAMING_SNAKE_CASE__))
# verify masks
__lowerCamelCase : Optional[Any] = 8_2_2_8_7_3
self.assertEqual(encoding['labels'][0]['masks'].sum().item() ,SCREAMING_SNAKE_CASE__)
# verify orig_size
__lowerCamelCase : Any = torch.tensor([4_8_0, 6_4_0])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,SCREAMING_SNAKE_CASE__))
# verify size
__lowerCamelCase : Any = torch.tensor([8_0_0, 1_0_6_6])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,SCREAMING_SNAKE_CASE__))
| 652 | 0 |
from __future__ import annotations
from math import ceil, floor, sqrt
def _lowercase ( __SCREAMING_SNAKE_CASE = 200_0000 ) -> int:
UpperCamelCase__ : list[int] = [0]
UpperCamelCase__ : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
UpperCamelCase__ : int = 0
# the area corresponding to the grid that gives the product closest to target
UpperCamelCase__ : int = 0
# an estimate of b, using the quadratic formula
UpperCamelCase__ : float
# the largest integer less than b_estimate
UpperCamelCase__ : int
# the largest integer less than b_estimate
UpperCamelCase__ : int
# the triangle number corresponding to b_floor
UpperCamelCase__ : int
# the triangle number corresponding to b_ceil
UpperCamelCase__ : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
UpperCamelCase__ : List[str] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
UpperCamelCase__ : Dict = floor(lowerCamelCase__ )
UpperCamelCase__ : Optional[int] = ceil(lowerCamelCase__ )
UpperCamelCase__ : List[str] = triangle_numbers[b_floor]
UpperCamelCase__ : Tuple = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
UpperCamelCase__ : Any = triangle_b_first_guess * triangle_a
UpperCamelCase__ : List[Any] = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
UpperCamelCase__ : int = triangle_b_second_guess * triangle_a
UpperCamelCase__ : Optional[Any] = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f'''{solution() = }''')
| 410 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : Optional[Any] = KandinskyVaaControlnetPipeline
_UpperCAmelCase : Optional[Any] = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
_UpperCAmelCase : int = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
_UpperCAmelCase : List[Any] = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_UpperCAmelCase : Tuple = False
@property
def lowerCAmelCase ( self : Tuple):
return 3_2
@property
def lowerCAmelCase ( self : List[Any]):
return 3_2
@property
def lowerCAmelCase ( self : str):
return self.time_input_dim
@property
def lowerCAmelCase ( self : List[str]):
return self.time_input_dim * 4
@property
def lowerCAmelCase ( self : List[str]):
return 1_0_0
@property
def lowerCAmelCase ( self : Dict):
torch.manual_seed(0)
__lowerCamelCase : Optional[Any] = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__lowerCamelCase : Union[str, Any] = UNetaDConditionModel(**SCREAMING_SNAKE_CASE__)
return model
@property
def lowerCAmelCase ( self : Union[str, Any]):
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase ( self : Optional[Any]):
torch.manual_seed(0)
__lowerCamelCase : int = VQModel(**self.dummy_movq_kwargs)
return model
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Tuple = self.dummy_unet
__lowerCamelCase : List[Any] = self.dummy_movq
__lowerCamelCase : str = DDIMScheduler(
num_train_timesteps=1_0_0_0 ,beta_schedule='linear' ,beta_start=0.00085 ,beta_end=0.012 ,clip_sample=SCREAMING_SNAKE_CASE__ ,set_alpha_to_one=SCREAMING_SNAKE_CASE__ ,steps_offset=1 ,prediction_type='epsilon' ,thresholding=SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : Dict = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Optional[int]=0):
__lowerCamelCase : str = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(SCREAMING_SNAKE_CASE__)).to(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1)).to(
SCREAMING_SNAKE_CASE__)
# create hint
__lowerCamelCase : Optional[int] = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(SCREAMING_SNAKE_CASE__)).to(SCREAMING_SNAKE_CASE__)
if str(SCREAMING_SNAKE_CASE__).startswith('mps'):
__lowerCamelCase : int = torch.manual_seed(SCREAMING_SNAKE_CASE__)
else:
__lowerCamelCase : int = torch.Generator(device=SCREAMING_SNAKE_CASE__).manual_seed(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 6_4,
'width': 6_4,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Dict = 'cpu'
__lowerCamelCase : Tuple = self.get_dummy_components()
__lowerCamelCase : Any = self.pipeline_class(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = pipe.to(SCREAMING_SNAKE_CASE__)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : int = output.images
__lowerCamelCase : Tuple = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__) ,return_dict=SCREAMING_SNAKE_CASE__ ,)[0]
__lowerCamelCase : Dict = image[0, -3:, -3:, -1]
__lowerCamelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__lowerCamelCase : List[str] = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : int):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : int):
__lowerCamelCase : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy')
__lowerCamelCase : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png')
__lowerCamelCase : Tuple = torch.from_numpy(np.array(SCREAMING_SNAKE_CASE__)).float() / 255.0
__lowerCamelCase : str = hint.permute(2 ,0 ,1).unsqueeze(0)
__lowerCamelCase : Tuple = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' ,torch_dtype=torch.floataa)
pipe_prior.to(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = KandinskyVaaControlnetPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' ,torch_dtype=torch.floataa)
__lowerCamelCase : int = pipeline.to(SCREAMING_SNAKE_CASE__)
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = 'A robot, 4k photo'
__lowerCamelCase : List[str] = torch.Generator(device='cuda').manual_seed(0)
__lowerCamelCase , __lowerCamelCase : Optional[Any] = pipe_prior(
SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,num_inference_steps=5 ,negative_prompt='' ,).to_tuple()
__lowerCamelCase : Optional[Any] = torch.Generator(device='cuda').manual_seed(0)
__lowerCamelCase : Any = pipeline(
image_embeds=SCREAMING_SNAKE_CASE__ ,negative_image_embeds=SCREAMING_SNAKE_CASE__ ,hint=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,num_inference_steps=1_0_0 ,output_type='np' ,)
__lowerCamelCase : List[Any] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
| 652 | 0 |
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' ,[
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] ,)
def lowerCAmelCase__ ( lowerCamelCase : Union[str, Any] ,lowerCamelCase : Any ):
_A : str = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' ,'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' ,'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' ,'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
_A : Tuple = DatasetInfosDict.from_directory(lowerCamelCase__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' ,[
DatasetInfo(),
DatasetInfo(
description='foo' ,features=Features({'a': Value('int32' )} ) ,builder_name='builder' ,config_name='config' ,version='1.0.0' ,splits=[{'name': 'train'}] ,download_size=42 ,),
] ,)
def lowerCAmelCase__ ( lowerCamelCase : List[Any] ,lowerCamelCase : int ):
_A : Optional[Any] = str(lowerCamelCase__ )
dataset_info.write_to_directory(lowerCamelCase__ )
_A : Optional[int] = DatasetInfo.from_directory(lowerCamelCase__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(lowerCamelCase__ ,'dataset_info.json' ) )
def lowerCAmelCase__ ( ):
_A : Tuple = DatasetInfo(
description='foo' ,citation='bar' ,homepage='https://foo.bar' ,license='CC0' ,features=Features({'a': Value('int32' )} ) ,post_processed={} ,supervised_keys=() ,task_templates=[] ,builder_name='builder' ,config_name='config' ,version='1.0.0' ,splits=[{'name': 'train', 'num_examples': 42}] ,download_checksums={} ,download_size=1337 ,post_processing_size=442 ,dataset_size=1234 ,size_in_bytes=1337 + 442 + 1234 ,)
_A : Any = dataset_info._to_yaml_dict()
assert sorted(lowerCamelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] ,(list, dict, int, str) )
_A : Any = yaml.safe_dump(lowerCamelCase__ )
_A : List[str] = yaml.safe_load(lowerCamelCase__ )
assert dataset_info_yaml_dict == reloaded
def lowerCAmelCase__ ( ):
_A : Dict = DatasetInfo()
_A : int = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' ,[
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' ,features=Features({'a': Value('int32' )} ) ,builder_name='builder' ,config_name='config' ,version='1.0.0' ,splits=[{'name': 'train'}] ,download_size=42 ,)
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=1337 ),
} ),
] ,)
def lowerCAmelCase__ ( lowerCamelCase : Optional[Any] ,lowerCamelCase : Optional[int] ):
_A : Optional[Any] = str(lowerCamelCase__ )
dataset_infos_dict.write_to_directory(lowerCamelCase__ )
_A : Dict = DatasetInfosDict.from_directory(lowerCamelCase__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_A : Dict = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_A : Tuple = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(lowerCamelCase__ ,'README.md' ) )
| 128 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class A_ :
_UpperCAmelCase : int = XGLMConfig
_UpperCAmelCase : List[Any] = {}
_UpperCAmelCase : Tuple = '''gelu'''
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]=1_4 ,SCREAMING_SNAKE_CASE__ : Tuple=7 ,SCREAMING_SNAKE_CASE__ : List[Any]=True ,SCREAMING_SNAKE_CASE__ : Any=True ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : Optional[int]=9_9 ,SCREAMING_SNAKE_CASE__ : str=3_2 ,SCREAMING_SNAKE_CASE__ : Tuple=2 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=4 ,SCREAMING_SNAKE_CASE__ : Tuple=3_7 ,SCREAMING_SNAKE_CASE__ : Tuple="gelu" ,SCREAMING_SNAKE_CASE__ : Any=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : List[Any]=5_1_2 ,SCREAMING_SNAKE_CASE__ : str=0.02 ,):
__lowerCamelCase : List[str] = parent
__lowerCamelCase : List[str] = batch_size
__lowerCamelCase : str = seq_length
__lowerCamelCase : Optional[Any] = is_training
__lowerCamelCase : Any = use_input_mask
__lowerCamelCase : str = use_labels
__lowerCamelCase : Any = vocab_size
__lowerCamelCase : Dict = d_model
__lowerCamelCase : int = num_hidden_layers
__lowerCamelCase : List[Any] = num_attention_heads
__lowerCamelCase : List[str] = ffn_dim
__lowerCamelCase : Optional[Any] = activation_function
__lowerCamelCase : Tuple = activation_dropout
__lowerCamelCase : Union[str, Any] = attention_dropout
__lowerCamelCase : List[str] = max_position_embeddings
__lowerCamelCase : List[Any] = initializer_range
__lowerCamelCase : Any = None
__lowerCamelCase : List[str] = 0
__lowerCamelCase : List[str] = 2
__lowerCamelCase : Dict = 1
def lowerCAmelCase ( self : Any):
return XGLMConfig.from_pretrained('facebook/xglm-564M')
def lowerCAmelCase ( self : str):
__lowerCamelCase : Any = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size) ,clip_value_min=0 ,clip_value_max=3)
__lowerCamelCase : Dict = None
if self.use_input_mask:
__lowerCamelCase : int = random_attention_mask([self.batch_size, self.seq_length])
__lowerCamelCase : int = self.get_config()
__lowerCamelCase : Union[str, Any] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] ,2)
return (
config,
input_ids,
input_mask,
head_mask,
)
def lowerCAmelCase ( self : List[Any]):
return XGLMConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,num_layers=self.num_hidden_layers ,attention_heads=self.num_attention_heads ,ffn_dim=self.ffn_dim ,activation_function=self.activation_function ,activation_dropout=self.activation_dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,use_cache=SCREAMING_SNAKE_CASE__ ,bos_token_id=self.bos_token_id ,eos_token_id=self.eos_token_id ,pad_token_id=self.pad_token_id ,return_dict=SCREAMING_SNAKE_CASE__ ,)
def lowerCAmelCase ( self : int):
__lowerCamelCase : int = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Any = config_and_inputs
__lowerCamelCase : str = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : str = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
_UpperCAmelCase : List[Any] = (TFXGLMForCausalLM,) if is_tf_available() else ()
_UpperCAmelCase : str = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Union[str, Any] = False
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : Tuple = TFXGLMModelTester(self)
__lowerCamelCase : int = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,n_embd=3_7)
def lowerCAmelCase ( self : List[Any]):
self.config_tester.run_common_tests()
@slow
def lowerCAmelCase ( self : str):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Union[str, Any] = TFXGLMModel.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.')
def lowerCAmelCase ( self : Union[str, Any]):
super().test_resize_token_embeddings()
@require_tf
class A_ ( unittest.TestCase ):
@slow
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=True):
__lowerCamelCase : Any = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M')
__lowerCamelCase : int = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]] ,dtype=tf.intaa) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__lowerCamelCase : Optional[Any] = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1]
# fmt: on
__lowerCamelCase : int = model.generate(SCREAMING_SNAKE_CASE__ ,do_sample=SCREAMING_SNAKE_CASE__ ,num_beams=1)
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() ,SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : Tuple = XGLMTokenizer.from_pretrained('facebook/xglm-564M')
__lowerCamelCase : int = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M')
tf.random.set_seed(0)
__lowerCamelCase : Optional[Any] = tokenizer('Today is a nice day and' ,return_tensors='tf')
__lowerCamelCase : List[Any] = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0'):
__lowerCamelCase : Union[str, Any] = model.generate(SCREAMING_SNAKE_CASE__ ,do_sample=SCREAMING_SNAKE_CASE__ ,seed=[7, 0])
__lowerCamelCase : List[str] = tokenizer.decode(output_ids[0] ,skip_special_tokens=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Union[str, Any] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M')
__lowerCamelCase : Union[str, Any] = XGLMTokenizer.from_pretrained('facebook/xglm-564M')
__lowerCamelCase : Union[str, Any] = 'left'
# use different length sentences to test batching
__lowerCamelCase : List[str] = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__lowerCamelCase : List[Any] = tokenizer(SCREAMING_SNAKE_CASE__ ,return_tensors='tf' ,padding=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = inputs['input_ids']
__lowerCamelCase : Dict = model.generate(input_ids=SCREAMING_SNAKE_CASE__ ,attention_mask=inputs['attention_mask'] ,max_new_tokens=1_2)
__lowerCamelCase : Tuple = tokenizer(sentences[0] ,return_tensors='tf').input_ids
__lowerCamelCase : List[str] = model.generate(input_ids=SCREAMING_SNAKE_CASE__ ,max_new_tokens=1_2)
__lowerCamelCase : Any = tokenizer(sentences[1] ,return_tensors='tf').input_ids
__lowerCamelCase : List[Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE__ ,max_new_tokens=1_2)
__lowerCamelCase : int = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ,skip_special_tokens=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = tokenizer.decode(output_non_padded[0] ,skip_special_tokens=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = tokenizer.decode(output_padded[0] ,skip_special_tokens=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,[non_padded_sentence, padded_sentence])
| 652 | 0 |
"""simple docstring"""
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
__snake_case : Dict = '\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n'
__snake_case : List[str] = '\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n'
__snake_case : List[Any] = '\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {\'pearson\': 1.0, \'spearmanr\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'cola\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def a_ ( __a , __a ):
return float((preds == labels).mean() )
def a_ ( __a , __a ):
A__ = simple_accuracy(lowerCamelCase__ , lowerCamelCase__ )
A__ = float(fa_score(y_true=lowerCamelCase__ , y_pred=lowerCamelCase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def a_ ( __a , __a ):
A__ = float(pearsonr(lowerCamelCase__ , lowerCamelCase__ )[0] )
A__ = float(spearmanr(lowerCamelCase__ , lowerCamelCase__ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
"""simple docstring"""
def A__ ( self : Optional[int] ):
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def A__ ( self : List[Any] , _lowerCamelCase : int , _lowerCamelCase : Tuple ):
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
elif self.config_name == "stsb":
return pearson_and_spearman(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
| 571 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 652 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
__lowerCAmelCase : List[str] ={
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class UpperCAmelCase ( UpperCamelCase__ ):
__lowercase = '''xmod'''
def __init__( self :List[Any] , lowercase_ :List[str]=3_05_22 , lowercase_ :List[Any]=7_68 , lowercase_ :Optional[int]=12 , lowercase_ :List[str]=12 , lowercase_ :List[str]=30_72 , lowercase_ :str="gelu" , lowercase_ :List[Any]=0.1 , lowercase_ :Tuple=0.1 , lowercase_ :Tuple=5_12 , lowercase_ :Dict=2 , lowercase_ :int=0.0_2 , lowercase_ :Dict=1E-12 , lowercase_ :List[Any]=1 , lowercase_ :Tuple=0 , lowercase_ :Optional[Any]=2 , lowercase_ :Optional[Any]="absolute" , lowercase_ :Dict=True , lowercase_ :List[Any]=None , lowercase_ :Dict=False , lowercase_ :List[str]=2 , lowercase_ :Tuple=False , lowercase_ :int=True , lowercase_ :List[str]=True , lowercase_ :Dict=("en_XX",) , lowercase_ :Dict=None , **lowercase_ :Any , )-> Tuple:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = classifier_dropout
A__ = pre_norm
A__ = adapter_reduction_factor
A__ = adapter_layer_norm
A__ = adapter_reuse_layer_norm
A__ = ln_before_adapter
A__ = list(SCREAMING_SNAKE_CASE__ )
A__ = default_language
class UpperCAmelCase ( UpperCamelCase__ ):
@property
def UpperCAmelCase_ ( self :List[str] )-> Optional[Any]:
if self.task == "multiple-choice":
A__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 440 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
a =logging.get_logger(__name__)
# General docstring
a ="""RegNetConfig"""
# Base docstring
a ="""facebook/regnet-y-040"""
a =[1, 1088, 7, 7]
# Image classification docstring
a ="""facebook/regnet-y-040"""
a ="""tabby, tabby cat"""
a =[
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 3 ,SCREAMING_SNAKE_CASE__ : int = 1 ,SCREAMING_SNAKE_CASE__ : int = 1 ,SCREAMING_SNAKE_CASE__ : Optional[str] = "relu" ,**SCREAMING_SNAKE_CASE__ : Optional[int] ,):
super().__init__(**SCREAMING_SNAKE_CASE__)
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__lowerCamelCase : Optional[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2)
__lowerCamelCase : Union[str, Any] = tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE__ ,kernel_size=SCREAMING_SNAKE_CASE__ ,strides=SCREAMING_SNAKE_CASE__ ,padding='VALID' ,groups=SCREAMING_SNAKE_CASE__ ,use_bias=SCREAMING_SNAKE_CASE__ ,name='convolution' ,)
__lowerCamelCase : int = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name='normalization')
__lowerCamelCase : Union[str, Any] = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[str]):
__lowerCamelCase : List[Any] = self.convolution(self.padding(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Union[str, Any] = self.normalization(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = self.activation(SCREAMING_SNAKE_CASE__)
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : str ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,**SCREAMING_SNAKE_CASE__ : Dict):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = config.num_channels
__lowerCamelCase : Dict = TFRegNetConvLayer(
out_channels=config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act ,name='embedder' ,)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : Optional[int] = shape_list(SCREAMING_SNAKE_CASE__)[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.')
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__lowerCamelCase : Optional[int] = tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 2, 3, 1))
__lowerCamelCase : List[Any] = self.embedder(SCREAMING_SNAKE_CASE__)
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 2 ,**SCREAMING_SNAKE_CASE__ : Tuple):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,strides=SCREAMING_SNAKE_CASE__ ,use_bias=SCREAMING_SNAKE_CASE__ ,name='convolution')
__lowerCamelCase : Optional[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name='normalization')
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : bool = False):
return self.normalization(self.convolution(SCREAMING_SNAKE_CASE__) ,training=SCREAMING_SNAKE_CASE__)
class A_ ( tf.keras.layers.Layer ):
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Any):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE__ ,name='pooler')
__lowerCamelCase : Dict = [
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation='relu' ,name='attention.0'),
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation='sigmoid' ,name='attention.2'),
]
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__lowerCamelCase : Optional[Any] = self.pooler(SCREAMING_SNAKE_CASE__)
for layer_module in self.attention:
__lowerCamelCase : Any = layer_module(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = hidden_state * pooled
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 1 ,**SCREAMING_SNAKE_CASE__ : List[Any]):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = in_channels != out_channels or stride != 1
__lowerCamelCase : Union[str, Any] = max(1 ,out_channels // config.groups_width)
__lowerCamelCase : Dict = (
TFRegNetShortCut(SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,name='shortcut')
if should_apply_shortcut
else tf.keras.layers.Activation('linear' ,name='shortcut')
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__lowerCamelCase : Optional[int] = [
TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=config.hidden_act ,name='layer.0'),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,groups=SCREAMING_SNAKE_CASE__ ,activation=config.hidden_act ,name='layer.1'),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=SCREAMING_SNAKE_CASE__ ,name='layer.2'),
]
__lowerCamelCase : Dict = ACTaFN[config.hidden_act]
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[int]):
__lowerCamelCase : int = hidden_state
for layer_module in self.layers:
__lowerCamelCase : List[str] = layer_module(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = self.shortcut(SCREAMING_SNAKE_CASE__)
hidden_state += residual
__lowerCamelCase : int = self.activation(SCREAMING_SNAKE_CASE__)
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 1 ,**SCREAMING_SNAKE_CASE__ : List[str]):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = in_channels != out_channels or stride != 1
__lowerCamelCase : Tuple = max(1 ,out_channels // config.groups_width)
__lowerCamelCase : int = (
TFRegNetShortCut(SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,name='shortcut')
if should_apply_shortcut
else tf.keras.layers.Activation('linear' ,name='shortcut')
)
__lowerCamelCase : Optional[int] = [
TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=config.hidden_act ,name='layer.0'),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,groups=SCREAMING_SNAKE_CASE__ ,activation=config.hidden_act ,name='layer.1'),
TFRegNetSELayer(SCREAMING_SNAKE_CASE__ ,reduced_channels=int(round(in_channels / 4)) ,name='layer.2'),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=SCREAMING_SNAKE_CASE__ ,name='layer.3'),
]
__lowerCamelCase : List[Any] = ACTaFN[config.hidden_act]
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Optional[int] = hidden_state
for layer_module in self.layers:
__lowerCamelCase : Dict = layer_module(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = self.shortcut(SCREAMING_SNAKE_CASE__)
hidden_state += residual
__lowerCamelCase : Any = self.activation(SCREAMING_SNAKE_CASE__)
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 2 ,SCREAMING_SNAKE_CASE__ : int = 2 ,**SCREAMING_SNAKE_CASE__ : Tuple):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
__lowerCamelCase : Tuple = [
# downsampling is done in the first layer with stride of 2
layer(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,name='layers.0'),
*[layer(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,name=F"layers.{i+1}") for i in range(depth - 1)],
]
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[Any]):
for layer_module in self.layers:
__lowerCamelCase : Any = layer_module(SCREAMING_SNAKE_CASE__)
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,**SCREAMING_SNAKE_CASE__ : Any):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
SCREAMING_SNAKE_CASE__ ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,name='stages.0' ,))
__lowerCamelCase : Optional[int] = zip(config.hidden_sizes ,config.hidden_sizes[1:])
for i, ((in_channels, out_channels), depth) in enumerate(zip(SCREAMING_SNAKE_CASE__ ,config.depths[1:])):
self.stages.append(TFRegNetStage(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,depth=SCREAMING_SNAKE_CASE__ ,name=F"stages.{i+1}"))
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = True):
__lowerCamelCase : Optional[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__lowerCamelCase : Optional[Any] = hidden_states + (hidden_state,)
__lowerCamelCase : str = stage_module(SCREAMING_SNAKE_CASE__)
if output_hidden_states:
__lowerCamelCase : Tuple = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return TFBaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE__ ,hidden_states=SCREAMING_SNAKE_CASE__)
@keras_serializable
class A_ ( tf.keras.layers.Layer ):
_UpperCAmelCase : List[Any] = RegNetConfig
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Optional[int]):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = config
__lowerCamelCase : Optional[int] = TFRegNetEmbeddings(SCREAMING_SNAKE_CASE__ ,name='embedder')
__lowerCamelCase : Union[str, Any] = TFRegNetEncoder(SCREAMING_SNAKE_CASE__ ,name='encoder')
__lowerCamelCase : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE__ ,name='pooler')
@unpack_inputs
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : bool = False ,):
__lowerCamelCase : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase : Union[str, Any] = self.embedder(SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = self.encoder(
SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = encoder_outputs[0]
__lowerCamelCase : int = self.pooler(SCREAMING_SNAKE_CASE__)
# Change to NCHW output format have uniformity in the modules
__lowerCamelCase : Union[str, Any] = tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 3, 1, 2))
__lowerCamelCase : str = tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 3, 1, 2))
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__lowerCamelCase : Union[str, Any] = tuple([tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 3, 1, 2)) for h in encoder_outputs[1]])
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE__ ,pooler_output=SCREAMING_SNAKE_CASE__ ,hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states ,)
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Any = RegNetConfig
_UpperCAmelCase : Optional[int] = '''regnet'''
_UpperCAmelCase : List[Any] = '''pixel_values'''
@property
def lowerCAmelCase ( self : int):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) ,dtype=tf.floataa)}
a =r"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
a =r"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , SCREAMING_SNAKE_CASE , )
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,*SCREAMING_SNAKE_CASE__ : Tuple ,**SCREAMING_SNAKE_CASE__ : Tuple):
super().__init__(SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = TFRegNetMainLayer(SCREAMING_SNAKE_CASE__ ,name='regnet')
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=SCREAMING_SNAKE_CASE__ ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : int=False ,):
__lowerCamelCase : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase : Tuple = self.regnet(
pixel_values=SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__ ,)
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state ,pooler_output=outputs.pooler_output ,hidden_states=outputs.hidden_states ,)
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , SCREAMING_SNAKE_CASE , )
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,*SCREAMING_SNAKE_CASE__ : List[str] ,**SCREAMING_SNAKE_CASE__ : str):
super().__init__(SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = config.num_labels
__lowerCamelCase : Union[str, Any] = TFRegNetMainLayer(SCREAMING_SNAKE_CASE__ ,name='regnet')
# classification head
__lowerCamelCase : Optional[Any] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels ,name='classifier.1') if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=SCREAMING_SNAKE_CASE__ ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : tf.Tensor = None ,SCREAMING_SNAKE_CASE__ : tf.Tensor = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : Any=False ,):
__lowerCamelCase : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase : str = self.regnet(
SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = outputs.pooler_output if return_dict else outputs[1]
__lowerCamelCase : Optional[Any] = self.classifier[0](SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = self.classifier[1](SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = None if labels is None else self.hf_compute_loss(labels=SCREAMING_SNAKE_CASE__ ,logits=SCREAMING_SNAKE_CASE__)
if not return_dict:
__lowerCamelCase : Union[str, Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=SCREAMING_SNAKE_CASE__ ,logits=SCREAMING_SNAKE_CASE__ ,hidden_states=outputs.hidden_states)
| 652 | 0 |
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = ['a', 'b', 'c']
# Defaults to last layer if both are None
__lowerCAmelCase : Optional[int] = get_aligned_output_features_output_indices(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , ["""c"""] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [2] )
# Out indices set to match out features
__lowerCAmelCase : Optional[Any] = get_aligned_output_features_output_indices(["""a""", """c"""] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , ["""a""", """c"""] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [0, 2] )
# Out features set to match out indices
__lowerCAmelCase : Tuple = get_aligned_output_features_output_indices(SCREAMING_SNAKE_CASE__ , [0, 2] , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , ["""a""", """c"""] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [0, 2] )
# Out features selected from negative indices
__lowerCAmelCase : int = get_aligned_output_features_output_indices(SCREAMING_SNAKE_CASE__ , [-3, -1] , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , ["""a""", """c"""] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , [-3, -1] )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
"""simple docstring"""
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , SCREAMING_SNAKE_CASE__ )
# Out features must be a list
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""] )
# Out features must be a subset of stage names
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""] )
# Out indices must be a list or tuple
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(SCREAMING_SNAKE_CASE__ , 0 , ["""a""", """b"""] )
# Out indices must be a subset of stage names
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(SCREAMING_SNAKE_CASE__ , (0, 1) , ["""a"""] )
# Out features and out indices must be the same length
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""] )
# Out features should match out indices
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""] )
# Out features and out indices should be in order
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""] )
# Check passes with valid inputs
verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""] )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Any = BackboneMixin()
__lowerCAmelCase : Union[str, Any] = ['a', 'b', 'c']
__lowerCAmelCase : Optional[int] = ['a', 'c']
__lowerCAmelCase : int = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["""a""", """c"""] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
__lowerCAmelCase : Optional[int] = ['a', 'b']
self.assertEqual(backbone.out_features , ["""a""", """b"""] )
self.assertEqual(backbone.out_indices , [0, 1] )
__lowerCAmelCase : Dict = [-3, -1]
self.assertEqual(backbone.out_features , ["""a""", """c"""] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 651 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
a =object()
# For specifying empty leaf dict `{}`
a =object()
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
__lowerCamelCase : Tuple = tuple((re.compile(x + '$' ) for x in qs) )
for i in range(len(lowerCamelCase__ ) - len(lowerCamelCase__ ) + 1 ):
__lowerCamelCase : str = [x.match(lowerCamelCase__ ) for x, y in zip(lowerCamelCase__ , ks[i:] )]
if matches and all(lowerCamelCase__ ):
return True
return False
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
def replace(lowerCamelCase__ , lowerCamelCase__ ):
for rule, replacement in rules:
if _match(lowerCamelCase__ , lowerCamelCase__ ):
return replacement
return val
return replace
def SCREAMING_SNAKE_CASE__ ( ) -> str:
return [
# embeddings
(("transformer", "wpe", "embedding"), P('mp' , lowerCamelCase__ )),
(("transformer", "wte", "embedding"), P('mp' , lowerCamelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(lowerCamelCase__ , 'mp' )),
(("attention", "out_proj", "kernel"), P('mp' , lowerCamelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(lowerCamelCase__ , 'mp' )),
(("mlp", "c_fc", "bias"), P('mp' )),
(("mlp", "c_proj", "kernel"), P('mp' , lowerCamelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
__lowerCamelCase : List[str] = _get_partition_rules()
__lowerCamelCase : Optional[Any] = _replacement_rules(lowerCamelCase__ )
__lowerCamelCase : Tuple = {k: _unmatched for k in flatten_dict(lowerCamelCase__ )}
__lowerCamelCase : List[Any] = {k: replace(lowerCamelCase__ , lowerCamelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(lowerCamelCase__ ) )
| 652 | 0 |
'''simple docstring'''
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
A = {
'''return_dict''': False,
'''output_hidden_states''': True,
'''output_attentions''': True,
'''torchscript''': True,
'''torch_dtype''': '''float16''',
'''use_bfloat16''': True,
'''tf_legacy_loss''': True,
'''pruned_heads''': {'''a''': 1},
'''tie_word_embeddings''': False,
'''is_decoder''': True,
'''cross_attention_hidden_size''': 1_28,
'''add_cross_attention''': True,
'''tie_encoder_decoder''': True,
'''max_length''': 50,
'''min_length''': 3,
'''do_sample''': True,
'''early_stopping''': True,
'''num_beams''': 3,
'''num_beam_groups''': 3,
'''diversity_penalty''': 0.5,
'''temperature''': 2.0,
'''top_k''': 10,
'''top_p''': 0.7,
'''typical_p''': 0.2,
'''repetition_penalty''': 0.8,
'''length_penalty''': 0.8,
'''no_repeat_ngram_size''': 5,
'''encoder_no_repeat_ngram_size''': 5,
'''bad_words_ids''': [1, 2, 3],
'''num_return_sequences''': 3,
'''chunk_size_feed_forward''': 5,
'''output_scores''': True,
'''return_dict_in_generate''': True,
'''forced_bos_token_id''': 2,
'''forced_eos_token_id''': 3,
'''remove_invalid_values''': True,
'''architectures''': ['''BertModel'''],
'''finetuning_task''': '''translation''',
'''id2label''': {0: '''label'''},
'''label2id''': {'''label''': '''0'''},
'''tokenizer_class''': '''BertTokenizerFast''',
'''prefix''': '''prefix''',
'''bos_token_id''': 6,
'''pad_token_id''': 7,
'''eos_token_id''': 8,
'''sep_token_id''': 9,
'''decoder_start_token_id''': 10,
'''exponential_decay_length_penalty''': (5, 1.01),
'''suppress_tokens''': [0, 1],
'''begin_suppress_tokens''': 2,
'''task_specific_params''': {'''translation''': '''some_params'''},
'''problem_type''': '''regression''',
}
@is_staging_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _lowerCamelCase ( cls : Union[str, Any] ) -> Optional[int]:
_lowercase : str = TOKEN
HfFolder.save_token(SCREAMING_SNAKE_CASE__ )
@classmethod
def _lowerCamelCase ( cls : str ) -> Any:
try:
delete_repo(token=cls._token ,repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-config' )
except HTTPError:
pass
def _lowerCamelCase ( self : Union[str, Any] ) -> str:
_lowercase : str = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
config.push_to_hub('test-config' ,use_auth_token=self._token )
_lowercase : Union[str, Any] = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE__ ,getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) )
# Reset repo
delete_repo(token=self._token ,repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(SCREAMING_SNAKE_CASE__ ,repo_id='test-config' ,push_to_hub=SCREAMING_SNAKE_CASE__ ,use_auth_token=self._token )
_lowercase : List[Any] = BertConfig.from_pretrained(F'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE__ ,getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) )
def _lowerCamelCase ( self : Any ) -> Optional[int]:
_lowercase : Optional[Any] = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
config.push_to_hub('valid_org/test-config-org' ,use_auth_token=self._token )
_lowercase : List[str] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE__ ,getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) )
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
SCREAMING_SNAKE_CASE__ ,repo_id='valid_org/test-config-org' ,push_to_hub=SCREAMING_SNAKE_CASE__ ,use_auth_token=self._token )
_lowercase : Optional[int] = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE__ ,getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) )
def _lowerCamelCase ( self : Tuple ) -> List[str]:
CustomConfig.register_for_auto_class()
_lowercase : Optional[Any] = CustomConfig(attribute=42 )
config.push_to_hub('test-dynamic-config' ,use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map ,{'AutoConfig': 'custom_configuration.CustomConfig'} )
_lowercase : List[Any] = AutoConfig.from_pretrained(F'''{USER}/test-dynamic-config''' ,trust_remote_code=SCREAMING_SNAKE_CASE__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ ,'CustomConfig' )
self.assertEqual(new_config.attribute ,42 )
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self : str ) -> List[Any]:
_lowercase : Dict = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
_lowercase : Tuple = c.n_embd + 1 # int
_lowercase : Dict = c.resid_pdrop + 1.0 # float
_lowercase : int = not c.scale_attn_weights # bool
_lowercase : int = c.summary_type + 'foo' # str
c.update_from_string(
F'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(SCREAMING_SNAKE_CASE__ ,c.n_embd ,'mismatch for key: n_embd' )
self.assertEqual(SCREAMING_SNAKE_CASE__ ,c.resid_pdrop ,'mismatch for key: resid_pdrop' )
self.assertEqual(SCREAMING_SNAKE_CASE__ ,c.scale_attn_weights ,'mismatch for key: scale_attn_weights' )
self.assertEqual(SCREAMING_SNAKE_CASE__ ,c.summary_type ,'mismatch for key: summary_type' )
def _lowerCamelCase ( self : Optional[Any] ) -> str:
_lowercase : List[Any] = PretrainedConfig()
_lowercase : str = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
SCREAMING_SNAKE_CASE__ ,['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
_lowercase : Optional[Any] = [key for key, value in config_common_kwargs.items() if value == getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )]
if len(SCREAMING_SNAKE_CASE__ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F''' {', '.join(SCREAMING_SNAKE_CASE__ )}.''' )
def _lowerCamelCase ( self : str ) -> int:
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
# config is in subfolder, the following should not work without specifying the subfolder
_lowercase : str = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
_lowercase : str = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' ,subfolder='bert' )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def _lowerCamelCase ( self : Tuple ) -> Union[str, Any]:
# A mock response for an HTTP head request to emulate server down
_lowercase : Any = mock.Mock()
_lowercase : Tuple = 500
_lowercase : str = {}
_lowercase : Union[str, Any] = HTTPError
_lowercase : List[Any] = {}
# Download this model to make sure it's in the cache.
_lowercase : Optional[Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' ,return_value=SCREAMING_SNAKE_CASE__ ) as mock_head:
_lowercase : List[Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def _lowerCamelCase ( self : Dict ) -> Optional[int]:
# This test is for deprecated behavior and can be removed in v5
_lowercase : Tuple = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def _lowerCamelCase ( self : Union[str, Any] ) -> Optional[int]:
_lowercase : str = AutoConfig.from_pretrained('bert-base-cased' )
_lowercase : Union[str, Any] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(SCREAMING_SNAKE_CASE__ )
_lowercase : Optional[int] = 2
json.dump(configuration.to_dict() ,open(os.path.join(SCREAMING_SNAKE_CASE__ ,'config.4.0.0.json' ) ,'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
_lowercase : str = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertEqual(new_configuration.hidden_size ,2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
_lowercase : List[str] = ['config.42.0.0.json']
_lowercase : List[Any] = 768
configuration.save_pretrained(SCREAMING_SNAKE_CASE__ )
shutil.move(os.path.join(SCREAMING_SNAKE_CASE__ ,'config.4.0.0.json' ) ,os.path.join(SCREAMING_SNAKE_CASE__ ,'config.42.0.0.json' ) )
_lowercase : str = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertEqual(new_configuration.hidden_size ,768 )
def _lowerCamelCase ( self : int ) -> Any:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
_lowercase : Optional[int] = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
_lowercase : List[str] = 'v4.0.0'
_lowercase : List[str] = new_transformers.models.auto.AutoConfig.from_pretrained(
SCREAMING_SNAKE_CASE__ ,return_unused_kwargs=SCREAMING_SNAKE_CASE__ )
self.assertEqual(new_configuration.hidden_size ,2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(SCREAMING_SNAKE_CASE__ ,{} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
_lowercase : Union[str, Any] = 'v3.0.0'
_lowercase : Dict = old_transformers.models.auto.AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertEqual(old_configuration.hidden_size ,768 ) | 125 |
import math
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> list:
__lowerCamelCase : Union[str, Any] = [True] * n
__lowerCamelCase : List[Any] = False
__lowerCamelCase : int = False
__lowerCamelCase : Any = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
__lowerCamelCase : List[str] = i * 2
while index < n:
__lowerCamelCase : Optional[int] = False
__lowerCamelCase : List[Any] = index + i
__lowerCamelCase : Optional[Any] = [2]
for i in range(3 , lowerCamelCase__ , 2 ):
if is_prime[i]:
primes.append(lowerCamelCase__ )
return primes
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 9_9_9_9_6_6_6_6_3_3_3_3 ) -> int:
__lowerCamelCase : Optional[Any] = math.floor(math.sqrt(lowerCamelCase__ ) ) + 1_0_0
__lowerCamelCase : Dict = prime_sieve(lowerCamelCase__ )
__lowerCamelCase : Tuple = 0
__lowerCamelCase : Dict = 0
__lowerCamelCase : Any = primes[prime_index]
while (last_prime**2) <= limit:
__lowerCamelCase : Any = primes[prime_index + 1]
__lowerCamelCase : Optional[Any] = last_prime**2
__lowerCamelCase : Dict = next_prime**2
# Get numbers divisible by lps(current)
__lowerCamelCase : Tuple = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
__lowerCamelCase : Any = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
__lowerCamelCase : List[Any] = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
__lowerCamelCase : Dict = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 652 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_SCREAMING_SNAKE_CASE = {
"configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"],
"configuration_data2vec_text": [
"DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecTextConfig",
"Data2VecTextOnnxConfig",
],
"configuration_data2vec_vision": [
"DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecVisionConfig",
"Data2VecVisionOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecAudioForAudioFrameClassification",
"Data2VecAudioForCTC",
"Data2VecAudioForSequenceClassification",
"Data2VecAudioForXVector",
"Data2VecAudioModel",
"Data2VecAudioPreTrainedModel",
]
_SCREAMING_SNAKE_CASE = [
"DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecTextForCausalLM",
"Data2VecTextForMaskedLM",
"Data2VecTextForMultipleChoice",
"Data2VecTextForQuestionAnswering",
"Data2VecTextForSequenceClassification",
"Data2VecTextForTokenClassification",
"Data2VecTextModel",
"Data2VecTextPreTrainedModel",
]
_SCREAMING_SNAKE_CASE = [
"DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecVisionForImageClassification",
"Data2VecVisionForMaskedImageModeling",
"Data2VecVisionForSemanticSegmentation",
"Data2VecVisionModel",
"Data2VecVisionPreTrainedModel",
]
if is_tf_available():
_SCREAMING_SNAKE_CASE = [
"TFData2VecVisionForImageClassification",
"TFData2VecVisionForSemanticSegmentation",
"TFData2VecVisionModel",
"TFData2VecVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 181 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : torch.FloatTensor
_UpperCAmelCase : torch.FloatTensor
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Dict = 1
@register_to_config
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : int = 2_0_0_0 ,SCREAMING_SNAKE_CASE__ : float = 0.15 ,SCREAMING_SNAKE_CASE__ : float = 0.01 ,SCREAMING_SNAKE_CASE__ : float = 1348.0 ,SCREAMING_SNAKE_CASE__ : float = 1E-5 ,SCREAMING_SNAKE_CASE__ : int = 1 ,):
# standard deviation of the initial noise distribution
__lowerCamelCase : int = sigma_max
# setable values
__lowerCamelCase : List[str] = None
self.set_sigmas(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : Optional[int] = None):
return sample
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : Union[str, torch.device] = None):
__lowerCamelCase : Dict = sampling_eps if sampling_eps is not None else self.config.sampling_eps
__lowerCamelCase : Optional[int] = torch.linspace(1 ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,device=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : float = None):
__lowerCamelCase : Optional[int] = sigma_min if sigma_min is not None else self.config.sigma_min
__lowerCamelCase : Optional[int] = sigma_max if sigma_max is not None else self.config.sigma_max
__lowerCamelCase : Any = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
__lowerCamelCase : Optional[Any] = torch.exp(torch.linspace(math.log(SCREAMING_SNAKE_CASE__) ,math.log(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__))
__lowerCamelCase : str = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps])
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : List[str]):
return torch.where(
timesteps == 0 ,torch.zeros_like(t.to(timesteps.device)) ,self.discrete_sigmas[timesteps - 1].to(timesteps.device) ,)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : Optional[torch.Generator] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler')
__lowerCamelCase : List[str] = timestep * torch.ones(
sample.shape[0] ,device=sample.device) # torch.repeat_interleave(timestep, sample.shape[0])
__lowerCamelCase : str = (timestep * (len(self.timesteps) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
__lowerCamelCase : Dict = timesteps.to(self.discrete_sigmas.device)
__lowerCamelCase : Optional[Any] = self.discrete_sigmas[timesteps].to(sample.device)
__lowerCamelCase : Optional[Any] = self.get_adjacent_sigma(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__).to(sample.device)
__lowerCamelCase : int = torch.zeros_like(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
__lowerCamelCase : int = diffusion.flatten()
while len(diffusion.shape) < len(sample.shape):
__lowerCamelCase : List[Any] = diffusion.unsqueeze(-1)
__lowerCamelCase : Any = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
__lowerCamelCase : int = randn_tensor(
sample.shape ,layout=sample.layout ,generator=SCREAMING_SNAKE_CASE__ ,device=sample.device ,dtype=sample.dtype)
__lowerCamelCase : Optional[int] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
__lowerCamelCase : Union[str, Any] = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=SCREAMING_SNAKE_CASE__ ,prev_sample_mean=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : Optional[torch.Generator] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler')
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
__lowerCamelCase : Optional[int] = randn_tensor(sample.shape ,layout=sample.layout ,generator=SCREAMING_SNAKE_CASE__).to(sample.device)
# compute step size from the model_output, the noise, and the snr
__lowerCamelCase : str = torch.norm(model_output.reshape(model_output.shape[0] ,-1) ,dim=-1).mean()
__lowerCamelCase : Tuple = torch.norm(noise.reshape(noise.shape[0] ,-1) ,dim=-1).mean()
__lowerCamelCase : Tuple = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
__lowerCamelCase : Optional[int] = step_size * torch.ones(sample.shape[0]).to(sample.device)
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
__lowerCamelCase : Union[str, Any] = step_size.flatten()
while len(step_size.shape) < len(sample.shape):
__lowerCamelCase : List[str] = step_size.unsqueeze(-1)
__lowerCamelCase : str = sample + step_size * model_output
__lowerCamelCase : Any = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__lowerCamelCase : int = timesteps.to(original_samples.device)
__lowerCamelCase : Any = self.discrete_sigmas.to(original_samples.device)[timesteps]
__lowerCamelCase : Optional[Any] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(SCREAMING_SNAKE_CASE__) * sigmas[:, None, None, None]
)
__lowerCamelCase : str = noise + original_samples
return noisy_samples
def __len__( self : Optional[int]):
return self.config.num_train_timesteps
| 652 | 0 |
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_UpperCamelCase = logging.getLogger(__name__)
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
if os.path.exists(lowerCamelCase__ ):
if os.path.exists(os.path.join(lowerCamelCase__ , """config.json""" ) ) and os.path.isfile(
os.path.join(lowerCamelCase__ , """config.json""" ) ):
os.remove(os.path.join(lowerCamelCase__ , """config.json""" ) )
if os.path.exists(os.path.join(lowerCamelCase__ , """pytorch_model.bin""" ) ) and os.path.isfile(
os.path.join(lowerCamelCase__ , """pytorch_model.bin""" ) ):
os.remove(os.path.join(lowerCamelCase__ , """pytorch_model.bin""" ) )
else:
os.makedirs(lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
def _a ( _snake_case , _snake_case=False ):
"""simple docstring"""
UpperCAmelCase = 2
if unlogit:
UpperCAmelCase = torch.pow(lowerCamelCase__ , lowerCamelCase__ )
UpperCAmelCase = p * torch.log(lowerCamelCase__ )
UpperCAmelCase = 0
return -plogp.sum(dim=-1 )
def _a ( _snake_case ):
"""simple docstring"""
logger.info("""lv, h >\t""" + """\t""".join(F'''{x + 1}''' for x in range(len(lowerCamelCase__ ) ) ) )
for row in range(len(lowerCamelCase__ ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + """\t""".join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + """\t""".join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def _a ( _snake_case , _snake_case , _snake_case , _snake_case=True , _snake_case=True , _snake_case=None , _snake_case=False ):
"""simple docstring"""
UpperCAmelCase = model.config.num_hidden_layers, model.config.num_attention_heads
UpperCAmelCase = torch.zeros(lowerCamelCase__ , lowerCamelCase__ ).to(args.device )
UpperCAmelCase = torch.zeros(lowerCamelCase__ , lowerCamelCase__ ).to(args.device )
if head_mask is None:
UpperCAmelCase = torch.ones(lowerCamelCase__ , lowerCamelCase__ ).to(args.device )
head_mask.requires_grad_(requires_grad=lowerCamelCase__ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
UpperCAmelCase = None
UpperCAmelCase = 0.0
UpperCAmelCase = 0.0
for step, inputs in enumerate(tqdm(lowerCamelCase__ , desc="""Iteration""" , disable=args.local_rank not in [-1, 0] ) ):
UpperCAmelCase = tuple(t.to(args.device ) for t in inputs )
(UpperCAmelCase ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
UpperCAmelCase = model(lowerCamelCase__ , labels=lowerCamelCase__ , head_mask=lowerCamelCase__ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
UpperCAmelCase = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(lowerCamelCase__ ):
UpperCAmelCase = entropy(attn.detach() , lowerCamelCase__ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(lowerCamelCase__ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
UpperCAmelCase = 2
UpperCAmelCase = torch.pow(torch.pow(lowerCamelCase__ , lowerCamelCase__ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
UpperCAmelCase = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("""Attention entropies""" )
print_ad_tensor(lowerCamelCase__ )
if compute_importance:
logger.info("""Head importance scores""" )
print_ad_tensor(lowerCamelCase__ )
logger.info("""Head ranked by importance scores""" )
UpperCAmelCase = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
UpperCAmelCase = torch.arange(
head_importance.numel() , device=args.device )
UpperCAmelCase = head_ranks.view_as(lowerCamelCase__ )
print_ad_tensor(lowerCamelCase__ )
return attn_entropy, head_importance, total_loss
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = compute_heads_importance(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , compute_entropy=lowerCamelCase__ )
UpperCAmelCase = 1 / loss # instead of downsteam score use the LM loss
logger.info("""Pruning: original score: %f, threshold: %f""" , lowerCamelCase__ , original_score * args.masking_threshold )
UpperCAmelCase = torch.ones_like(lowerCamelCase__ )
UpperCAmelCase = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
UpperCAmelCase = original_score
while current_score >= original_score * args.masking_threshold:
UpperCAmelCase = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
UpperCAmelCase = float("""Inf""" )
UpperCAmelCase = head_importance.view(-1 ).sort()[1]
if len(lowerCamelCase__ ) <= num_to_mask:
print("""BREAK BY num_to_mask""" )
break
# mask heads
UpperCAmelCase = current_heads_to_mask[:num_to_mask]
logger.info("""Heads to mask: %s""" , str(current_heads_to_mask.tolist() ) )
UpperCAmelCase = new_head_mask.view(-1 )
UpperCAmelCase = 0.0
UpperCAmelCase = new_head_mask.view_as(lowerCamelCase__ )
UpperCAmelCase = new_head_mask.clone().detach()
print_ad_tensor(lowerCamelCase__ )
# Compute metric and head importance again
UpperCAmelCase = compute_heads_importance(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , compute_entropy=lowerCamelCase__ , head_mask=lowerCamelCase__ )
UpperCAmelCase = 1 / loss
logger.info(
"""Masking: current score: %f, remaining heads %d (%.1f percents)""" , lowerCamelCase__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("""Final head mask""" )
print_ad_tensor(lowerCamelCase__ )
np.save(os.path.join(args.output_dir , """head_mask.npy""" ) , head_mask.detach().cpu().numpy() )
return head_mask
def _a ( _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = datetime.now()
UpperCAmelCase = compute_heads_importance(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , compute_entropy=lowerCamelCase__ , compute_importance=lowerCamelCase__ , head_mask=lowerCamelCase__ )
UpperCAmelCase = 1 / loss
UpperCAmelCase = datetime.now() - before_time
UpperCAmelCase = sum(p.numel() for p in model.parameters() )
UpperCAmelCase = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(lowerCamelCase__ ) )
}
for k, v in heads_to_prune.items():
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
UpperCAmelCase = [
v,
]
assert sum(len(lowerCamelCase__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(lowerCamelCase__ )
UpperCAmelCase = sum(p.numel() for p in model.parameters() )
UpperCAmelCase = datetime.now()
UpperCAmelCase = compute_heads_importance(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , compute_entropy=lowerCamelCase__ , compute_importance=lowerCamelCase__ , head_mask=lowerCamelCase__ , actually_pruned=lowerCamelCase__ , )
UpperCAmelCase = 1 / loss
UpperCAmelCase = datetime.now() - before_time
logger.info(
"""Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)""" , lowerCamelCase__ , lowerCamelCase__ , pruned_num_params / original_num_params * 100 , )
logger.info("""Pruning: score with masking: %f score with pruning: %f""" , lowerCamelCase__ , lowerCamelCase__ )
logger.info("""Pruning: speed ratio (original timing / new timing): %f percents""" , original_time / new_time * 100 )
save_model(lowerCamelCase__ , args.output_dir )
def _a ( ):
"""simple docstring"""
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--data_dir""" , default=lowerCamelCase__ , type=lowerCamelCase__ , required=lowerCamelCase__ , help="""The input data dir. Should contain the .tsv files (or other data files) for the task.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=lowerCamelCase__ , type=lowerCamelCase__ , required=lowerCamelCase__ , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--output_dir""" , default=lowerCamelCase__ , type=lowerCamelCase__ , required=lowerCamelCase__ , help="""The output directory where the model predictions and checkpoints will be written.""" , )
# Other parameters
parser.add_argument(
"""--config_name""" , default="""""" , type=lowerCamelCase__ , help="""Pretrained config name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--tokenizer_name""" , default="""""" , type=lowerCamelCase__ , help="""Pretrained tokenizer name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--cache_dir""" , default=lowerCamelCase__ , type=lowerCamelCase__ , help="""Where do you want to store the pre-trained models downloaded from s3""" , )
parser.add_argument(
"""--data_subset""" , type=lowerCamelCase__ , default=-1 , help="""If > 0: limit the data to a subset of data_subset instances.""" )
parser.add_argument(
"""--overwrite_output_dir""" , action="""store_true""" , help="""Whether to overwrite data in output directory""" )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
parser.add_argument(
"""--dont_normalize_importance_by_layer""" , action="""store_true""" , help="""Don\'t normalize importance score by layers""" )
parser.add_argument(
"""--dont_normalize_global_importance""" , action="""store_true""" , help="""Don\'t normalize all importance scores between 0 and 1""" , )
parser.add_argument(
"""--try_masking""" , action="""store_true""" , help="""Whether to try to mask head until a threshold of accuracy.""" )
parser.add_argument(
"""--masking_threshold""" , default=0.9 , type=lowerCamelCase__ , help="""masking threshold in term of metrics (stop masking when metric < threshold * original metric value).""" , )
parser.add_argument(
"""--masking_amount""" , default=0.1 , type=lowerCamelCase__ , help="""Amount to heads to masking at each masking step.""" )
parser.add_argument("""--metric_name""" , default="""acc""" , type=lowerCamelCase__ , help="""Metric to use for head masking.""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=lowerCamelCase__ , help=(
"""The maximum total input sequence length after WordPiece tokenization. \n"""
"""Sequences longer than this will be truncated, sequences shorter padded."""
) , )
parser.add_argument("""--batch_size""" , default=1 , type=lowerCamelCase__ , help="""Batch size.""" )
parser.add_argument("""--seed""" , type=lowerCamelCase__ , default=42 )
parser.add_argument("""--local_rank""" , type=lowerCamelCase__ , default=-1 , help="""local_rank for distributed training on gpus""" )
parser.add_argument("""--no_cuda""" , action="""store_true""" , help="""Whether not to use CUDA when available""" )
parser.add_argument("""--server_ip""" , type=lowerCamelCase__ , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=lowerCamelCase__ , default="""""" , help="""Can be used for distant debugging.""" )
UpperCAmelCase = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCamelCase__ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
UpperCAmelCase = torch.device("""cuda""" if torch.cuda.is_available() and not args.no_cuda else """cpu""" )
UpperCAmelCase = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
UpperCAmelCase = torch.device("""cuda""" , args.local_rank )
UpperCAmelCase = 1
torch.distributed.init_process_group(backend="""nccl""" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("""device: {} n_gpu: {}, distributed: {}""".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
UpperCAmelCase = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
UpperCAmelCase = nn.parallel.DistributedDataParallel(
lowerCamelCase__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=lowerCamelCase__ )
elif args.n_gpu > 1:
UpperCAmelCase = nn.DataParallel(lowerCamelCase__ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=lowerCamelCase__ )
torch.save(lowerCamelCase__ , os.path.join(args.output_dir , """run_args.bin""" ) )
logger.info("""Training/evaluation parameters %s""" , lowerCamelCase__ )
# Prepare dataset
UpperCAmelCase = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
UpperCAmelCase = (torch.from_numpy(lowerCamelCase__ ),)
UpperCAmelCase = TensorDataset(*lowerCamelCase__ )
UpperCAmelCase = RandomSampler(lowerCamelCase__ )
UpperCAmelCase = DataLoader(lowerCamelCase__ , sampler=lowerCamelCase__ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
UpperCAmelCase = mask_heads(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
prune_heads(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
main()
| 341 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="""%(message)s""")
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> np.ndarray:
return input_array.reshape((input_array.size, 1) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray:
__lowerCamelCase : str = np.nan
for i in range(lowerCamelCase__ ):
__lowerCamelCase : int = features[:, labels == i]
__lowerCamelCase : Optional[int] = data.mean(1 )
# Centralize the data of class i
__lowerCamelCase : int = data - column_reshape(lowerCamelCase__ )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(lowerCamelCase__ , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
__lowerCamelCase : Union[str, Any] = np.dot(lowerCamelCase__ , centered_data.T )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray:
__lowerCamelCase : Optional[Any] = features.mean(1 )
__lowerCamelCase : Union[str, Any] = np.nan
for i in range(lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] = features[:, labels == i]
__lowerCamelCase : Union[str, Any] = data.shape[1]
__lowerCamelCase : Union[str, Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ ) , (column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
__lowerCamelCase : List[str] = device_data * np.dot(
column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ ) , (column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ )).T , )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray:
# Check if the features have been loaded
if features.any():
__lowerCamelCase : Tuple = features.mean(1 )
# Center the dataset
__lowerCamelCase : Any = features - np.reshape(lowerCamelCase__ , (data_mean.size, 1) )
__lowerCamelCase : Optional[int] = np.dot(lowerCamelCase__ , centered_data.T ) / features.shape[1]
__lowerCamelCase , __lowerCamelCase : List[Any] = np.linalg.eigh(lowerCamelCase__ )
# Take all the columns in the reverse order (-1), and then takes only the first
__lowerCamelCase : Dict = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
__lowerCamelCase : int = np.dot(filtered_eigenvectors.T , lowerCamelCase__ )
logging.info('Principal Component Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowerCamelCase__ )
logging.error('Dataset empty' )
raise AssertionError
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray:
assert classes > dimensions
# Check if features have been already loaded
if features.any:
__lowerCamelCase , __lowerCamelCase : Dict = eigh(
covariance_between_classes(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , covariance_within_classes(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , )
__lowerCamelCase : Union[str, Any] = eigenvectors[:, ::-1][:, :dimensions]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = np.linalg.svd(lowerCamelCase__ )
__lowerCamelCase : int = svd_matrix[:, 0:dimensions]
__lowerCamelCase : Optional[int] = np.dot(filtered_svd_matrix.T , lowerCamelCase__ )
logging.info('Linear Discriminant Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowerCamelCase__ )
logging.error('Dataset empty' )
raise AssertionError
def SCREAMING_SNAKE_CASE__ ( ) -> None:
# Create dummy dataset with 2 classes and 3 features
__lowerCamelCase : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
__lowerCamelCase : Optional[int] = np.array([0, 0, 0, 1, 1] )
__lowerCamelCase : Optional[Any] = 2
__lowerCamelCase : Tuple = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(lowerCamelCase__ ) as error_info:
__lowerCamelCase : int = linear_discriminant_analysis(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if isinstance(lowerCamelCase__ , np.ndarray ):
raise AssertionError(
'Did not raise AssertionError for dimensions > classes' )
assert error_info.type is AssertionError
def SCREAMING_SNAKE_CASE__ ( ) -> None:
__lowerCamelCase : Dict = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
__lowerCamelCase : Dict = 2
__lowerCamelCase : int = np.array([[6.9282_0323, 8.6602_5404, 10.3923_0485], [3.0, 3.0, 3.0]] )
with pytest.raises(lowerCamelCase__ ) as error_info:
__lowerCamelCase : Optional[Any] = principal_component_analysis(lowerCamelCase__ , lowerCamelCase__ )
if not np.allclose(lowerCamelCase__ , lowerCamelCase__ ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 | 0 |
'''simple docstring'''
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
'''simple docstring'''
snake_case : List[str] = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
snake_case : Optional[int] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw ).convert('''RGB''' )
snake_case : List[Any] = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.4814_5466, 0.457_8275, 0.4082_1073) , (0.2686_2954, 0.2613_0258, 0.2757_7711) ),
] )
snake_case : int = transform(lowerCamelCase__ ).unsqueeze(0 ).to(lowerCamelCase__ )
return image
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> Dict:
'''simple docstring'''
if "visual_encoder" in key:
snake_case : int = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , lowerCamelCase__ )
if "blocks" in key:
snake_case : str = re.sub(R'''blocks''' , '''layers''' , lowerCamelCase__ )
if "attn" in key:
snake_case : List[str] = re.sub(R'''attn''' , '''self_attn''' , lowerCamelCase__ )
if "norm1" in key:
snake_case : Dict = re.sub(R'''norm1''' , '''layer_norm1''' , lowerCamelCase__ )
if "norm2" in key:
snake_case : Tuple = re.sub(R'''norm2''' , '''layer_norm2''' , lowerCamelCase__ )
if "encoder.norm" in key:
snake_case : Optional[int] = re.sub(R'''encoder.norm''' , '''post_layernorm''' , lowerCamelCase__ )
if "encoder.patch_embed.proj" in key:
snake_case : List[str] = re.sub(R'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , lowerCamelCase__ )
if "encoder.pos_embed" in key:
snake_case : List[str] = re.sub(R'''encoder.pos_embed''' , '''embeddings.position_embedding''' , lowerCamelCase__ )
if "encoder.cls_token" in key:
snake_case : List[Any] = re.sub(R'''encoder.cls_token''' , '''embeddings.class_embedding''' , lowerCamelCase__ )
if "self_attn" in key:
snake_case : Tuple = re.sub(R'''self_attn.proj''' , '''self_attn.projection''' , lowerCamelCase__ )
return key
@torch.no_grad()
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ) -> int:
'''simple docstring'''
if config_path is not None:
snake_case : int = BlipConfig.from_pretrained(lowerCamelCase__ )
else:
snake_case : Union[str, Any] = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
snake_case : List[Any] = BlipForConditionalGeneration(lowerCamelCase__ ).eval()
snake_case : Any = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'
snake_case : List[str] = blip_decoder(pretrained=lowerCamelCase__ , image_size=384 , vit='''base''' )
snake_case : str = pt_model.eval()
snake_case : Union[str, Any] = pt_model.state_dict()
for key in modified_state_dict.copy():
snake_case : Tuple = modified_state_dict.pop(lowerCamelCase__ )
snake_case : Optional[int] = rename_key(lowerCamelCase__ )
snake_case : Dict = value
hf_model.load_state_dict(lowerCamelCase__ )
snake_case : str = 384
snake_case : Optional[Any] = load_demo_image(image_size=lowerCamelCase__ , device='''cpu''' )
snake_case : int = BertTokenizer.from_pretrained('''bert-base-uncased''' )
snake_case : Union[str, Any] = tokenizer(['''a picture of'''] ).input_ids
snake_case : Optional[int] = hf_model.generate(lowerCamelCase__ , lowerCamelCase__ )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
snake_case : Optional[Any] = hf_model.generate(lowerCamelCase__ )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowerCamelCase__ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
snake_case : Tuple = (
'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'
)
snake_case : Optional[int] = blip_vqa(pretrained=lowerCamelCase__ , image_size=lowerCamelCase__ , vit='''base''' )
vqa_model.eval()
snake_case : int = vqa_model.state_dict()
for key in modified_state_dict.copy():
snake_case : List[Any] = modified_state_dict.pop(lowerCamelCase__ )
snake_case : int = rename_key(lowerCamelCase__ )
snake_case : Dict = value
snake_case : Union[str, Any] = BlipForQuestionAnswering(lowerCamelCase__ )
hf_vqa_model.load_state_dict(lowerCamelCase__ )
snake_case : Dict = ['How many dogs are in this image?']
snake_case : Any = tokenizer(lowerCamelCase__ , return_tensors='''pt''' ).input_ids
snake_case : Dict = hf_vqa_model.generate(lowerCamelCase__ , lowerCamelCase__ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
snake_case : int = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'
snake_case : Union[str, Any] = blip_itm(pretrained=lowerCamelCase__ , image_size=lowerCamelCase__ , vit='''base''' )
itm_model.eval()
snake_case : List[Any] = itm_model.state_dict()
for key in modified_state_dict.copy():
snake_case : Any = modified_state_dict.pop(lowerCamelCase__ )
snake_case : List[Any] = rename_key(lowerCamelCase__ )
snake_case : Optional[Any] = value
snake_case : Dict = BlipForImageTextRetrieval(lowerCamelCase__ )
snake_case : List[Any] = ['A picture of a woman with a dog sitting in a beach']
snake_case : Union[str, Any] = tokenizer(
lowerCamelCase__ , return_tensors='''pt''' , padding='''max_length''' , truncation=lowerCamelCase__ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(lowerCamelCase__ )
hf_itm_model.eval()
snake_case : int = hf_itm_model(lowerCamelCase__ , lowerCamelCase__ , use_itm_head=lowerCamelCase__ )
snake_case : Optional[Any] = hf_itm_model(lowerCamelCase__ , lowerCamelCase__ , use_itm_head=lowerCamelCase__ )
assert out[0].item() == 0.2110_6874_9427_7954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_5698_8453_8650_5127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
lowercase__ = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 638 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
a =logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Path ,SCREAMING_SNAKE_CASE__ : Union[str, None] = None ,SCREAMING_SNAKE_CASE__ : Union[List[str], None] = None ,SCREAMING_SNAKE_CASE__ : Union[str, List[str], None] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,):
__lowerCamelCase : List[str] = [file for file in os.listdir(SCREAMING_SNAKE_CASE__) if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))]
if identifier is not None:
__lowerCamelCase : str = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
for n_ in n_identifier:
__lowerCamelCase : Optional[int] = [file for file in files if n_ not in file]
else:
__lowerCamelCase : Dict = [file for file in files if n_identifier not in file]
__lowerCamelCase : str = ignore_files or []
ignore_files.append('__init__.py')
__lowerCamelCase : Tuple = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' ,SCREAMING_SNAKE_CASE__)
if only_modules:
__lowerCamelCase : Optional[int] = file.split('.')[0]
try:
__lowerCamelCase : Optional[Any] = getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = doctest.DocTestSuite(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = unittest.TextTestRunner().run(SCREAMING_SNAKE_CASE__)
self.assertIs(len(result.failures) ,0)
except AttributeError:
logger.info(F"{module_identifier} is not a module.")
else:
__lowerCamelCase : int = doctest.testfile(str('..' / directory / file) ,optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed ,0)
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : Dict = Path('src/transformers')
__lowerCamelCase : Any = 'modeling'
__lowerCamelCase : Dict = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__ ,ignore_files=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Tuple = Path('src/transformers')
__lowerCamelCase : Optional[int] = 'tokenization'
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : List[Any] = Path('src/transformers')
__lowerCamelCase : str = 'configuration'
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int):
__lowerCamelCase : Dict = Path('src/transformers')
__lowerCamelCase : Any = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,n_identifier=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int):
__lowerCamelCase : List[Any] = Path('docs/source')
__lowerCamelCase : str = ['favicon.ico']
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,ignore_files=SCREAMING_SNAKE_CASE__ ,only_modules=SCREAMING_SNAKE_CASE__)
| 652 | 0 |
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : str = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
UpperCamelCase : Any = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def __A ( self , A , A , A ) -> Dict:
'''simple docstring'''
lowerCamelCase = AudioClassificationPipeline(model=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
# test with a raw waveform
lowerCamelCase = np.zeros((3_40_00,) )
lowerCamelCase = np.zeros((1_40_00,) )
return audio_classifier, [audioa, audio]
def __A ( self , A , A ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = examples
lowerCamelCase = audio_classifier(SCREAMING_SNAKE_CASE__ )
# by default a model is initialized with num_labels=2
self.assertEqual(
SCREAMING_SNAKE_CASE__ , [
{"""score""": ANY(SCREAMING_SNAKE_CASE__ ), """label""": ANY(SCREAMING_SNAKE_CASE__ )},
{"""score""": ANY(SCREAMING_SNAKE_CASE__ ), """label""": ANY(SCREAMING_SNAKE_CASE__ )},
] , )
lowerCamelCase = audio_classifier(SCREAMING_SNAKE_CASE__ , top_k=1 )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , [
{"""score""": ANY(SCREAMING_SNAKE_CASE__ ), """label""": ANY(SCREAMING_SNAKE_CASE__ )},
] , )
self.run_torchaudio(SCREAMING_SNAKE_CASE__ )
@require_torchaudio
def __A ( self , A ) -> Union[str, Any]:
'''simple docstring'''
import datasets
# test with a local file
lowerCamelCase = datasets.load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
lowerCamelCase = dataset[0]['audio']['array']
lowerCamelCase = audio_classifier(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
SCREAMING_SNAKE_CASE__ , [
{"""score""": ANY(SCREAMING_SNAKE_CASE__ ), """label""": ANY(SCREAMING_SNAKE_CASE__ )},
{"""score""": ANY(SCREAMING_SNAKE_CASE__ ), """label""": ANY(SCREAMING_SNAKE_CASE__ )},
] , )
@require_torch
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = 'anton-l/wav2vec2-random-tiny-classifier'
lowerCamelCase = pipeline("""audio-classification""" , model=SCREAMING_SNAKE_CASE__ )
lowerCamelCase = np.ones((80_00,) )
lowerCamelCase = audio_classifier(SCREAMING_SNAKE_CASE__ , top_k=4 )
lowerCamelCase = [
{'score': 0.0842, 'label': 'no'},
{'score': 0.0838, 'label': 'up'},
{'score': 0.0837, 'label': 'go'},
{'score': 0.0834, 'label': 'right'},
]
lowerCamelCase = [
{'score': 0.0845, 'label': 'stop'},
{'score': 0.0844, 'label': 'on'},
{'score': 0.0841, 'label': 'right'},
{'score': 0.0834, 'label': 'left'},
]
self.assertIn(nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
lowerCamelCase = {'array': np.ones((80_00,) ), 'sampling_rate': audio_classifier.feature_extractor.sampling_rate}
lowerCamelCase = audio_classifier(SCREAMING_SNAKE_CASE__ , top_k=4 )
self.assertIn(nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def __A ( self ) -> Any:
'''simple docstring'''
import datasets
lowerCamelCase = 'superb/wav2vec2-base-superb-ks'
lowerCamelCase = pipeline("""audio-classification""" , model=SCREAMING_SNAKE_CASE__ )
lowerCamelCase = datasets.load_dataset("""anton-l/superb_dummy""" , """ks""" , split="""test""" )
lowerCamelCase = np.array(dataset[3]["""speech"""] , dtype=np.floataa )
lowerCamelCase = audio_classifier(SCREAMING_SNAKE_CASE__ , top_k=4 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ , decimals=3 ) , [
{"""score""": 0.981, """label""": """go"""},
{"""score""": 0.007, """label""": """up"""},
{"""score""": 0.006, """label""": """_unknown_"""},
{"""score""": 0.001, """label""": """down"""},
] , )
@require_tf
@unittest.skip("""Audio classification is not implemented for TF""" )
def __A ( self ) -> Dict:
'''simple docstring'''
pass
| 457 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a =logging.get_logger(__name__)
a ="""▁"""
a ={"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
a ={
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
a ={"""vinai/bartpho-syllable""": 1024}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[str] = VOCAB_FILES_NAMES
_UpperCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Dict = ['''input_ids''', '''attention_mask''']
def __init__( self : Dict ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : List[Any]="<s>" ,SCREAMING_SNAKE_CASE__ : Any="</s>" ,SCREAMING_SNAKE_CASE__ : List[str]="</s>" ,SCREAMING_SNAKE_CASE__ : List[str]="<s>" ,SCREAMING_SNAKE_CASE__ : int="<unk>" ,SCREAMING_SNAKE_CASE__ : Dict="<pad>" ,SCREAMING_SNAKE_CASE__ : List[str]="<mask>" ,SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None ,**SCREAMING_SNAKE_CASE__ : Dict ,):
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase : Union[str, Any] = AddedToken(SCREAMING_SNAKE_CASE__ ,lstrip=SCREAMING_SNAKE_CASE__ ,rstrip=SCREAMING_SNAKE_CASE__) if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) else mask_token
__lowerCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ ,eos_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,sep_token=SCREAMING_SNAKE_CASE__ ,cls_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,mask_token=SCREAMING_SNAKE_CASE__ ,sp_model_kwargs=self.sp_model_kwargs ,**SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : int = vocab_file
__lowerCamelCase : Tuple = monolingual_vocab_file
__lowerCamelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(SCREAMING_SNAKE_CASE__))
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
__lowerCamelCase : Optional[int] = {}
__lowerCamelCase : List[Any] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(SCREAMING_SNAKE_CASE__) not in self.fairseq_tokens_to_ids:
__lowerCamelCase : Any = cnt
cnt += 1
with open(SCREAMING_SNAKE_CASE__ ,'r' ,encoding='utf-8') as f:
for line in f.readlines():
__lowerCamelCase : Any = line.strip().split()[0]
__lowerCamelCase : List[str] = len(self.fairseq_tokens_to_ids)
if str(SCREAMING_SNAKE_CASE__) not in self.fairseq_tokens_to_ids:
__lowerCamelCase : Dict = len(self.fairseq_tokens_to_ids)
__lowerCamelCase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int):
__lowerCamelCase : Tuple = self.__dict__.copy()
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Optional[int] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
__lowerCamelCase : List[str] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs'):
__lowerCamelCase : str = {}
__lowerCamelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCamelCase : Tuple = [self.cls_token_id]
__lowerCamelCase : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ,SCREAMING_SNAKE_CASE__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ ,token_ids_a=SCREAMING_SNAKE_CASE__ ,already_has_special_tokens=SCREAMING_SNAKE_CASE__)
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1]
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
__lowerCamelCase : Dict = [self.sep_token_id]
__lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def lowerCAmelCase ( self : List[str]):
return len(self.fairseq_ids_to_tokens)
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Optional[int] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : str):
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ ,out_type=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[Any]):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Dict):
return self.fairseq_ids_to_tokens[index]
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Any = ''.join(SCREAMING_SNAKE_CASE__).replace(SCREAMING_SNAKE_CASE__ ,' ').strip()
return out_string
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None):
if not os.path.isdir(SCREAMING_SNAKE_CASE__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
__lowerCamelCase : Union[str, Any] = os.path.join(
SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
__lowerCamelCase : Union[str, Any] = os.path.join(
SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] ,)
if os.path.abspath(self.vocab_file) != os.path.abspath(SCREAMING_SNAKE_CASE__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file ,SCREAMING_SNAKE_CASE__)
elif not os.path.isfile(self.vocab_file):
with open(SCREAMING_SNAKE_CASE__ ,'wb') as fi:
__lowerCamelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__)
if os.path.abspath(self.monolingual_vocab_file) != os.path.abspath(
SCREAMING_SNAKE_CASE__) and os.path.isfile(self.monolingual_vocab_file):
copyfile(self.monolingual_vocab_file ,SCREAMING_SNAKE_CASE__)
elif not os.path.isfile(self.monolingual_vocab_file):
with open(SCREAMING_SNAKE_CASE__ ,'w' ,encoding='utf-8') as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F"{str(SCREAMING_SNAKE_CASE__)} \n")
return out_vocab_file, out_monolingual_vocab_file
| 652 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_a : List[str] = logging.get_logger(__name__)
class _UpperCAmelCase ( lowerCAmelCase_ ):
def __init__( self,*__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""",SCREAMING_SNAKE_CASE__,)
super().__init__(*SCREAMING_SNAKE_CASE__,**SCREAMING_SNAKE_CASE__ )
| 689 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class A_ :
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Dict=1_3 ,SCREAMING_SNAKE_CASE__ : int=3_0 ,SCREAMING_SNAKE_CASE__ : int=2 ,SCREAMING_SNAKE_CASE__ : List[Any]=3 ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : int=True ,SCREAMING_SNAKE_CASE__ : List[str]=3_2 ,SCREAMING_SNAKE_CASE__ : Any=2 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=4 ,SCREAMING_SNAKE_CASE__ : List[str]=3_7 ,SCREAMING_SNAKE_CASE__ : Optional[Any]="gelu" ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0 ,SCREAMING_SNAKE_CASE__ : int=0.02 ,SCREAMING_SNAKE_CASE__ : str=3 ,SCREAMING_SNAKE_CASE__ : Dict=None ,SCREAMING_SNAKE_CASE__ : Optional[Any]=2 ,):
__lowerCamelCase : Optional[int] = parent
__lowerCamelCase : Optional[Any] = batch_size
__lowerCamelCase : Dict = image_size
__lowerCamelCase : Optional[Any] = patch_size
__lowerCamelCase : Optional[Any] = num_channels
__lowerCamelCase : str = is_training
__lowerCamelCase : List[Any] = use_labels
__lowerCamelCase : Any = hidden_size
__lowerCamelCase : Optional[int] = num_hidden_layers
__lowerCamelCase : Any = num_attention_heads
__lowerCamelCase : Tuple = intermediate_size
__lowerCamelCase : Dict = hidden_act
__lowerCamelCase : Optional[Any] = hidden_dropout_prob
__lowerCamelCase : List[Any] = attention_probs_dropout_prob
__lowerCamelCase : Dict = type_sequence_label_size
__lowerCamelCase : Optional[Any] = initializer_range
__lowerCamelCase : List[str] = scope
__lowerCamelCase : Union[str, Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__lowerCamelCase : str = (image_size // patch_size) ** 2
__lowerCamelCase : str = num_patches + 2
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__lowerCamelCase : List[Any] = None
if self.use_labels:
__lowerCamelCase : int = ids_tensor([self.batch_size] ,self.type_sequence_label_size)
__lowerCamelCase : List[str] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self : List[Any]):
return DeiTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=SCREAMING_SNAKE_CASE__ ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Dict):
__lowerCamelCase : Optional[Any] = TFDeiTModel(config=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size))
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Any):
__lowerCamelCase : Optional[int] = TFDeiTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = model(SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
__lowerCamelCase : int = 1
__lowerCamelCase : Tuple = TFDeiTForMaskedImageModeling(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__lowerCamelCase : str = model(SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size))
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]):
__lowerCamelCase : Dict = self.type_sequence_label_size
__lowerCamelCase : List[Any] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = model(SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size))
# test greyscale images
__lowerCamelCase : List[Any] = 1
__lowerCamelCase : Tuple = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__lowerCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size))
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = config_and_inputs
__lowerCamelCase : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : Union[str, Any] = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
_UpperCAmelCase : List[Any] = (
{
'''feature-extraction''': TFDeiTModel,
'''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Optional[int] = False
def lowerCAmelCase ( self : Any):
__lowerCamelCase : str = TFDeiTModelTester(self)
__lowerCamelCase : Optional[int] = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,has_text_modality=SCREAMING_SNAKE_CASE__ ,hidden_size=3_7)
def lowerCAmelCase ( self : str):
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds')
def lowerCAmelCase ( self : List[Any]):
pass
def lowerCAmelCase ( self : Dict):
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer))
__lowerCamelCase : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ ,tf.keras.layers.Dense))
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase , __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : Any = [*signature.parameters.keys()]
__lowerCamelCase : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : str):
__lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : str):
__lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : str=False):
__lowerCamelCase : Optional[Any] = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,return_labels=SCREAMING_SNAKE_CASE__)
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def lowerCAmelCase ( self : Optional[int]):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Union[str, Any] = TFDeiTModel.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
__lowerCamelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase ( self : List[Any]):
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224')
if is_vision_available()
else None
)
@slow
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : Optional[int] = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224')
__lowerCamelCase : int = self.default_image_processor
__lowerCamelCase : Tuple = prepare_img()
__lowerCamelCase : Tuple = image_processor(images=SCREAMING_SNAKE_CASE__ ,return_tensors='tf')
# forward pass
__lowerCamelCase : int = model(**SCREAMING_SNAKE_CASE__)
# verify the logits
__lowerCamelCase : Optional[int] = tf.TensorShape((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = tf.constant([-1.0266, 0.1912, -1.2861])
self.assertTrue(np.allclose(outputs.logits[0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
| 652 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase : List[str] = {
'''configuration_perceiver''': ['''PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PerceiverConfig''', '''PerceiverOnnxConfig'''],
'''tokenization_perceiver''': ['''PerceiverTokenizer'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[int] = ['''PerceiverFeatureExtractor''']
__lowerCamelCase : str = ['''PerceiverImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
'''PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PerceiverForImageClassificationConvProcessing''',
'''PerceiverForImageClassificationFourier''',
'''PerceiverForImageClassificationLearned''',
'''PerceiverForMaskedLM''',
'''PerceiverForMultimodalAutoencoding''',
'''PerceiverForOpticalFlow''',
'''PerceiverForSequenceClassification''',
'''PerceiverLayer''',
'''PerceiverModel''',
'''PerceiverPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 653 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class A_ (a_ ):
"""simple docstring"""
a__ = '''facebook/bart-large-mnli'''
a__ = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
a__ = '''text_classifier'''
a__ = AutoTokenizer
a__ = AutoModelForSequenceClassification
a__ = ['''text''', ['''text''']]
a__ = ['''text''']
def _A ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
super().setup()
snake_case_ : Optional[int] = self.model.config
snake_case_ : Any = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
snake_case_ : Union[str, Any] = int(lowerCAmelCase__ )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def _A ( self :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :Tuple ) -> int:
'''simple docstring'''
snake_case_ : Tuple = labels
return self.pre_processor(
[text] * len(lowerCAmelCase__ ) , [F'''This example is {label}''' for label in labels] , return_tensors="pt" , padding="max_length" , )
def _A ( self :Any , lowerCAmelCase__ :str ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = outputs.logits
snake_case_ : Tuple = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 653 | 1 |
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 653 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase : Any = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''ViTFeatureExtractor''']
__lowerCamelCase : Any = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__lowerCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 653 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> float:
"""simple docstring"""
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 653 |
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A_ :
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str]=2 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :List[str]=7 , lowerCAmelCase__ :Any=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :List[str]=99 , lowerCAmelCase__ :Union[str, Any]=36 , lowerCAmelCase__ :Dict=3 , lowerCAmelCase__ :str=4 , lowerCAmelCase__ :Optional[int]=37 , lowerCAmelCase__ :Dict="gelu" , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :Optional[int]=512 , lowerCAmelCase__ :Union[str, Any]=16 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :Any=0.0_2 , lowerCAmelCase__ :Dict=6 , lowerCAmelCase__ :Optional[int]=6 , lowerCAmelCase__ :Any=3 , lowerCAmelCase__ :int=4 , lowerCAmelCase__ :int=None , lowerCAmelCase__ :Any=1_000 , ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = parent
snake_case_ : Union[str, Any] = batch_size
snake_case_ : Optional[int] = num_channels
snake_case_ : List[Any] = image_size
snake_case_ : Optional[int] = patch_size
snake_case_ : Union[str, Any] = text_seq_length
snake_case_ : Dict = is_training
snake_case_ : Optional[Any] = use_input_mask
snake_case_ : Union[str, Any] = use_token_type_ids
snake_case_ : Dict = use_labels
snake_case_ : List[str] = vocab_size
snake_case_ : Optional[Any] = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : List[str] = intermediate_size
snake_case_ : str = hidden_act
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Optional[int] = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = max_position_embeddings
snake_case_ : List[Any] = type_vocab_size
snake_case_ : Union[str, Any] = type_sequence_label_size
snake_case_ : List[Any] = initializer_range
snake_case_ : Union[str, Any] = coordinate_size
snake_case_ : int = shape_size
snake_case_ : Tuple = num_labels
snake_case_ : List[Any] = num_choices
snake_case_ : List[str] = scope
snake_case_ : Dict = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
snake_case_ : str = text_seq_length
snake_case_ : Optional[int] = (image_size // patch_size) ** 2 + 1
snake_case_ : str = self.text_seq_length + self.image_seq_length
def _A ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
snake_case_ : str = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case_ : Optional[Any] = bbox[i, j, 3]
snake_case_ : Any = bbox[i, j, 1]
snake_case_ : Tuple = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case_ : str = bbox[i, j, 2]
snake_case_ : Dict = bbox[i, j, 0]
snake_case_ : Union[str, Any] = t
snake_case_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : Dict = None
if self.use_input_mask:
snake_case_ : str = random_attention_mask([self.batch_size, self.text_seq_length] )
snake_case_ : Any = None
if self.use_token_type_ids:
snake_case_ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
snake_case_ : Union[str, Any] = None
snake_case_ : str = None
if self.use_labels:
snake_case_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
snake_case_ : str = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _A ( self :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = LayoutLMvaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
# text + image
snake_case_ : Tuple = model(lowerCAmelCase__ , pixel_values=lowerCAmelCase__ )
snake_case_ : Optional[int] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
snake_case_ : Optional[int] = model(lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
snake_case_ : int = model(lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
snake_case_ : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
snake_case_ : Union[str, Any] = model(pixel_values=lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _A ( self :str , lowerCAmelCase__ :str , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = self.num_labels
snake_case_ : List[Any] = LayoutLMvaForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : Optional[int] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = self.num_labels
snake_case_ : str = LayoutLMvaForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : List[Any] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _A ( self :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :str ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = LayoutLMvaForQuestionAnswering(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : List[Any] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A ( self :int ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) : Optional[Any] = config_and_inputs
snake_case_ : Tuple = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class A_ (a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a__ = False
a__ = False
a__ = False
a__ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
a__ = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def _A ( self :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[Any] ) -> List[str]:
'''simple docstring'''
return True
def _A ( self :List[Any] ) -> str:
'''simple docstring'''
snake_case_ : Tuple = LayoutLMvaModelTester(self )
snake_case_ : Optional[int] = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def _A ( self :Tuple , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any]=False ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = copy.deepcopy(lowerCAmelCase__ )
if model_class in get_values(lowerCAmelCase__ ):
snake_case_ : Optional[Any] = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(lowerCAmelCase__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCAmelCase__ ):
snake_case_ : Union[str, Any] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
elif model_class in get_values(lowerCAmelCase__ ):
snake_case_ : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
snake_case_ : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
elif model_class in [
*get_values(lowerCAmelCase__ ),
]:
snake_case_ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
elif model_class in [
*get_values(lowerCAmelCase__ ),
]:
snake_case_ : List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowerCAmelCase__ , )
return inputs_dict
def _A ( self :Any ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def _A ( self :int ) -> int:
'''simple docstring'''
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _A ( self :Any ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ : int = type
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _A ( self :int ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def _A ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
def _A ( self :int ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
@slow
def _A ( self :Tuple ) -> List[Any]:
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : str = LayoutLMvaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
snake_case_ : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class A_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def _A ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase__ ) if is_vision_available() else None
@slow
def _A ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(lowerCAmelCase__ )
snake_case_ : Optional[Any] = self.default_image_processor
snake_case_ : Optional[int] = prepare_img()
snake_case_ : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).pixel_values.to(lowerCAmelCase__ )
snake_case_ : List[str] = torch.tensor([[1, 2]] )
snake_case_ : Any = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
snake_case_ : Any = model(
input_ids=input_ids.to(lowerCAmelCase__ ) , bbox=bbox.to(lowerCAmelCase__ ) , pixel_values=pixel_values.to(lowerCAmelCase__ ) , )
# verify the logits
snake_case_ : Optional[Any] = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase__ )
snake_case_ : str = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 653 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class A_ (a_ ):
"""simple docstring"""
a__ = '''facebook/bart-large-mnli'''
a__ = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
a__ = '''text_classifier'''
a__ = AutoTokenizer
a__ = AutoModelForSequenceClassification
a__ = ['''text''', ['''text''']]
a__ = ['''text''']
def _A ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
super().setup()
snake_case_ : Optional[int] = self.model.config
snake_case_ : Any = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
snake_case_ : Union[str, Any] = int(lowerCAmelCase__ )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def _A ( self :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :Tuple ) -> int:
'''simple docstring'''
snake_case_ : Tuple = labels
return self.pre_processor(
[text] * len(lowerCAmelCase__ ) , [F'''This example is {label}''' for label in labels] , return_tensors="pt" , padding="max_length" , )
def _A ( self :Any , lowerCAmelCase__ :str ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = outputs.logits
snake_case_ : Tuple = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 653 |
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __UpperCAmelCase ( __magic_name__ )-> int: # picklable for multiprocessing
"""simple docstring"""
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
snake_case_ : str = [1, 2, 3]
with pytest.raises(__magic_name__ ):
with parallel_backend("unsupported backend" ):
map_nested(__magic_name__ ,__magic_name__ ,num_proc=2 )
with pytest.raises(__magic_name__ ):
with parallel_backend("unsupported backend" ):
map_nested(__magic_name__ ,__magic_name__ ,num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" ,[2, -1] )
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = [1, 2]
snake_case_ : Union[str, Any] = {"a": 1, "b": 2}
snake_case_ : str = {"a": [1, 2], "b": [3, 4]}
snake_case_ : List[str] = {"a": {"1": 1}, "b": 2}
snake_case_ : Optional[int] = {"a": 1, "b": 2, "c": 3, "d": 4}
snake_case_ : Tuple = [2, 3]
snake_case_ : str = {"a": 2, "b": 3}
snake_case_ : Dict = {"a": [2, 3], "b": [4, 5]}
snake_case_ : List[Any] = {"a": {"1": 2}, "b": 3}
snake_case_ : str = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
| 653 | 1 |
'''simple docstring'''
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
__lowerCamelCase : Optional[int] = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
__lowerCamelCase : Dict = get_tests_dir('''fixtures/vocab.json''')
__lowerCamelCase : Optional[int] = get_tests_dir('''fixtures''')
class A_ (unittest.TestCase ):
"""simple docstring"""
a__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
def _A ( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : List[Any] = 0
def _A ( self :List[Any] ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[int] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def _A ( self :int ) -> Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Optional[Any] = WavaVecaConfig()
snake_case_ : Any = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
# save in new folder
model_config.save_pretrained(lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
snake_case_ : Any = AutoProcessor.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def _A ( self :Dict ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) )
copyfile(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , "vocab.json" ) )
snake_case_ : List[Any] = AutoProcessor.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def _A ( self :Any ) -> Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : int = WavaVecaFeatureExtractor()
snake_case_ : Any = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
snake_case_ : List[str] = WavaVecaProcessor(lowerCAmelCase__ , lowerCAmelCase__ )
# save in new folder
processor.save_pretrained(lowerCAmelCase__ )
# drop `processor_class` in tokenizer
with open(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , "r" ) as f:
snake_case_ : Tuple = json.load(lowerCAmelCase__ )
config_dict.pop("processor_class" )
with open(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , "w" ) as f:
f.write(json.dumps(lowerCAmelCase__ ) )
snake_case_ : Union[str, Any] = AutoProcessor.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def _A ( self :Any ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Optional[int] = WavaVecaFeatureExtractor()
snake_case_ : int = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
snake_case_ : Tuple = WavaVecaProcessor(lowerCAmelCase__ , lowerCAmelCase__ )
# save in new folder
processor.save_pretrained(lowerCAmelCase__ )
# drop `processor_class` in feature extractor
with open(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , "r" ) as f:
snake_case_ : Union[str, Any] = json.load(lowerCAmelCase__ )
config_dict.pop("processor_class" )
with open(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , "w" ) as f:
f.write(json.dumps(lowerCAmelCase__ ) )
snake_case_ : List[str] = AutoProcessor.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def _A ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Dict = WavaVecaConfig(processor_class="Wav2Vec2Processor" )
model_config.save_pretrained(lowerCAmelCase__ )
# copy relevant files
copyfile(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , "vocab.json" ) )
# create emtpy sample processor
with open(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , "w" ) as f:
f.write("{}" )
snake_case_ : Dict = AutoProcessor.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def _A ( self :List[str] ) -> Dict:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase__ ):
snake_case_ : Union[str, Any] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCAmelCase__ ):
snake_case_ : Tuple = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=lowerCAmelCase__ )
snake_case_ : Optional[Any] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" , trust_remote_code=lowerCAmelCase__ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
snake_case_ : Dict = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
snake_case_ : Union[str, Any] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
snake_case_ : Any = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=lowerCAmelCase__ , use_fast=lowerCAmelCase__ )
snake_case_ : List[str] = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def _A ( self :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
try:
AutoConfig.register("custom" , lowerCAmelCase__ )
AutoFeatureExtractor.register(lowerCAmelCase__ , lowerCAmelCase__ )
AutoTokenizer.register(lowerCAmelCase__ , slow_tokenizer_class=lowerCAmelCase__ )
AutoProcessor.register(lowerCAmelCase__ , lowerCAmelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase__ ):
AutoProcessor.register(lowerCAmelCase__ , lowerCAmelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
snake_case_ : Dict = CustomFeatureExtractor.from_pretrained(lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ : Optional[int] = os.path.join(lowerCAmelCase__ , "vocab.txt" )
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case_ : Tuple = CustomTokenizer(lowerCAmelCase__ )
snake_case_ : Tuple = CustomProcessor(lowerCAmelCase__ , lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(lowerCAmelCase__ )
snake_case_ : Dict = AutoProcessor.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _A ( self :List[str] ) -> Any:
'''simple docstring'''
class A_ (a_ ):
"""simple docstring"""
a__ = False
class A_ (a_ ):
"""simple docstring"""
a__ = False
class A_ (a_ ):
"""simple docstring"""
a__ = '''AutoFeatureExtractor'''
a__ = '''AutoTokenizer'''
a__ = False
try:
AutoConfig.register("custom" , lowerCAmelCase__ )
AutoFeatureExtractor.register(lowerCAmelCase__ , lowerCAmelCase__ )
AutoTokenizer.register(lowerCAmelCase__ , slow_tokenizer_class=lowerCAmelCase__ )
AutoProcessor.register(lowerCAmelCase__ , lowerCAmelCase__ )
# If remote code is not set, the default is to use local classes.
snake_case_ : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
snake_case_ : str = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
snake_case_ : int = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _A ( self :List[str] ) -> Any:
'''simple docstring'''
snake_case_ : Tuple = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(processor.__class__.__name__ , "BertTokenizerFast" )
def _A ( self :Optional[Any] ) -> int:
'''simple docstring'''
snake_case_ : List[str] = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-convnext" )
self.assertEqual(processor.__class__.__name__ , "ConvNextImageProcessor" )
@is_staging_test
class A_ (unittest.TestCase ):
"""simple docstring"""
a__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def _A ( cls :Any ) -> Any:
'''simple docstring'''
snake_case_ : Tuple = TOKEN
HfFolder.save_token(lowerCAmelCase__ )
@classmethod
def _A ( cls :int ) -> int:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-processor" )
except HTTPError:
pass
def _A ( self :str ) -> Tuple:
'''simple docstring'''
snake_case_ : int = WavaVecaProcessor.from_pretrained(lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(lowerCAmelCase__ , "test-processor" ) , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token )
snake_case_ : List[Any] = WavaVecaProcessor.from_pretrained(F'''{USER}/test-processor''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(new_processor.feature_extractor , lowerCAmelCase__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _A ( self :Tuple ) -> Any:
'''simple docstring'''
snake_case_ : Dict = WavaVecaProcessor.from_pretrained(lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(lowerCAmelCase__ , "test-processor-org" ) , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token , organization="valid_org" , )
snake_case_ : List[str] = WavaVecaProcessor.from_pretrained("valid_org/test-processor-org" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase__ , getattr(new_processor.feature_extractor , lowerCAmelCase__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _A ( self :List[Any] ) -> List[str]:
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
snake_case_ : Tuple = CustomFeatureExtractor.from_pretrained(lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case_ : int = os.path.join(lowerCAmelCase__ , "vocab.txt" )
with open(lowerCAmelCase__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
snake_case_ : List[str] = CustomTokenizer(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = CustomProcessor(lowerCAmelCase__ , lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F'''{USER}/test-dynamic-processor''' , token=self._token )
snake_case_ : Optional[Any] = Repository(lowerCAmelCase__ , clone_from=F'''{USER}/test-dynamic-processor''' , token=self._token )
processor.save_pretrained(lowerCAmelCase__ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor",
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(lowerCAmelCase__ , "tokenizer_config.json" ) ) as f:
snake_case_ : Tuple = json.load(lowerCAmelCase__ )
self.assertDictEqual(
tokenizer_config["auto_map"] , {
"AutoTokenizer": ["custom_tokenization.CustomTokenizer", None],
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(lowerCAmelCase__ , "custom_feature_extraction.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(lowerCAmelCase__ , "custom_tokenization.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(lowerCAmelCase__ , "custom_processing.py" ) ) )
repo.push_to_hub()
snake_case_ : List[Any] = AutoProcessor.from_pretrained(F'''{USER}/test-dynamic-processor''' , trust_remote_code=lowerCAmelCase__ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , "CustomProcessor" )
| 653 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Dict = logging.get_logger(__name__)
# TODO Update this
__lowerCamelCase : int = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''esm'''
def __init__( self :Dict , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :str=None , lowerCAmelCase__ :int=768 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :Dict=12 , lowerCAmelCase__ :Union[str, Any]=3_072 , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :List[Any]=1_026 , lowerCAmelCase__ :int=0.0_2 , lowerCAmelCase__ :Optional[int]=1E-1_2 , lowerCAmelCase__ :List[str]="absolute" , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :List[str]=False , lowerCAmelCase__ :List[Any]=False , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :str=None , **lowerCAmelCase__ :Union[str, Any] , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , mask_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case_ : str = vocab_size
snake_case_ : str = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : List[str] = num_attention_heads
snake_case_ : Any = intermediate_size
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Tuple = attention_probs_dropout_prob
snake_case_ : List[Any] = max_position_embeddings
snake_case_ : str = initializer_range
snake_case_ : List[Any] = layer_norm_eps
snake_case_ : str = position_embedding_type
snake_case_ : Optional[int] = use_cache
snake_case_ : str = emb_layer_norm_before
snake_case_ : List[Any] = token_dropout
snake_case_ : str = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
snake_case_ : Optional[Any] = EsmFoldConfig()
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : Union[str, Any] = EsmFoldConfig(**lowerCAmelCase__ )
snake_case_ : Optional[Any] = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
snake_case_ : List[str] = get_default_vocab_list()
else:
snake_case_ : List[str] = vocab_list
else:
snake_case_ : List[Any] = None
snake_case_ : int = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , lowerCAmelCase__ ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def _A ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case_ : Any = super().to_dict()
if isinstance(self.esmfold_config , lowerCAmelCase__ ):
snake_case_ : Optional[int] = self.esmfold_config.to_dict()
return output
@dataclass
class A_ :
"""simple docstring"""
a__ = None
a__ = True
a__ = False
a__ = False
a__ = False
a__ = 0
a__ = True
a__ = False
a__ = 128
a__ = None
def _A ( self :Dict ) -> int:
'''simple docstring'''
if self.trunk is None:
snake_case_ : Dict = TrunkConfig()
elif isinstance(self.trunk , lowerCAmelCase__ ):
snake_case_ : int = TrunkConfig(**self.trunk )
def _A ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = asdict(self )
snake_case_ : Optional[int] = self.trunk.to_dict()
return output
@dataclass
class A_ :
"""simple docstring"""
a__ = 48
a__ = 1024
a__ = 128
a__ = 32
a__ = 32
a__ = 32
a__ = 0
a__ = 0
a__ = False
a__ = 4
a__ = 128
a__ = None
def _A ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
if self.structure_module is None:
snake_case_ : Optional[int] = StructureModuleConfig()
elif isinstance(self.structure_module , lowerCAmelCase__ ):
snake_case_ : List[str] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
snake_case_ : Dict = self.sequence_state_dim // self.sequence_head_width
snake_case_ : Optional[int] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def _A ( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : int = asdict(self )
snake_case_ : Dict = self.structure_module.to_dict()
return output
@dataclass
class A_ :
"""simple docstring"""
a__ = 384
a__ = 128
a__ = 16
a__ = 128
a__ = 12
a__ = 4
a__ = 8
a__ = 0.1
a__ = 8
a__ = 1
a__ = 2
a__ = 7
a__ = 10
a__ = 1E-8
a__ = 1E5
def _A ( self :Dict ) -> Dict:
'''simple docstring'''
return asdict(self )
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 653 | 1 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int=13 , lowerCAmelCase__ :Union[str, Any]=3 , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :str=True , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :Any=0.1 , lowerCAmelCase__ :Dict=224 , lowerCAmelCase__ :str=1_000 , lowerCAmelCase__ :int=[3, 3, 6, 4] , lowerCAmelCase__ :Dict=[48, 56, 112, 220] , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : int = parent
snake_case_ : str = batch_size
snake_case_ : int = num_channels
snake_case_ : Any = is_training
snake_case_ : int = use_labels
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : Union[str, Any] = attention_probs_dropout_prob
snake_case_ : Optional[int] = num_labels
snake_case_ : List[str] = image_size
snake_case_ : Tuple = layer_depths
snake_case_ : Optional[Any] = embed_dims
def _A ( self :int ) -> Any:
'''simple docstring'''
snake_case_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : Any = None
if self.use_labels:
snake_case_ : str = ids_tensor([self.batch_size] , self.num_labels )
snake_case_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def _A ( self :Optional[int] ) -> str:
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowerCAmelCase__ , layer_scale_init_value=1E-5 , )
def _A ( self :int , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Dict ) -> Dict:
'''simple docstring'''
snake_case_ : Any = SwiftFormerModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : Union[str, Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _A ( self :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] ) -> str:
'''simple docstring'''
snake_case_ : str = self.num_labels
snake_case_ : Optional[int] = SwiftFormerForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : str = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
snake_case_ : Any = SwiftFormerForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : Any = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
((snake_case_), (snake_case_), (snake_case_)) : List[str] = self.prepare_config_and_inputs()
snake_case_ : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A_ (a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a__ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
a__ = (
{'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def _A ( self :List[str] ) -> int:
'''simple docstring'''
snake_case_ : int = SwiftFormerModelTester(self )
snake_case_ : List[str] = ConfigTester(
self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _A ( self :Dict ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def _A ( self :Optional[int] ) -> Optional[int]:
'''simple docstring'''
pass
def _A ( self :Any ) -> str:
'''simple docstring'''
snake_case_, snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Any = model_class(lowerCAmelCase__ )
snake_case_ : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear ) )
def _A ( self :List[str] ) -> Any:
'''simple docstring'''
snake_case_, snake_case_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Tuple = model_class(lowerCAmelCase__ )
snake_case_ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : Union[str, Any] = [*signature.parameters.keys()]
snake_case_ : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def _A ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _A ( self :str ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def _A ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : Optional[int] = SwiftFormerModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def _A ( self :int ) -> Union[str, Any]:
'''simple docstring'''
pass
def _A ( self :Union[str, Any] ) -> int:
'''simple docstring'''
def check_hidden_states_output(lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[Any] ):
snake_case_ : Tuple = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case_ : List[Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
snake_case_ : Dict = outputs.hidden_states
snake_case_ : Dict = 8
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowerCAmelCase__ ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
snake_case_, snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Dict = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ : List[str] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def _A ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
def _config_zero_init(lowerCAmelCase__ :str ):
snake_case_ : Optional[int] = copy.deepcopy(lowerCAmelCase__ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowerCAmelCase__ , lowerCAmelCase__ , 1E-1_0 )
if isinstance(getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ ):
snake_case_ : int = _config_zero_init(getattr(lowerCAmelCase__ , lowerCAmelCase__ ) )
setattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return configs_no_init
snake_case_, snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : int = _config_zero_init(lowerCAmelCase__ )
for model_class in self.all_model_classes:
snake_case_ : Union[str, Any] = model_class(config=lowerCAmelCase__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _A ( self :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
def __UpperCAmelCase ( )-> Tuple:
"""simple docstring"""
snake_case_ : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def _A ( self :Optional[int] ) -> Any:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : List[str] = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(lowerCAmelCase__ )
snake_case_ : Optional[int] = self.default_image_processor
snake_case_ : Dict = prepare_img()
snake_case_ : int = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
snake_case_ : Dict = model(**lowerCAmelCase__ )
# verify the logits
snake_case_ : List[str] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
snake_case_ : Dict = torch.tensor([[-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0]] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 653 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : Any = {
'''configuration_longformer''': [
'''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LongformerConfig''',
'''LongformerOnnxConfig''',
],
'''tokenization_longformer''': ['''LongformerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''LongformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
'''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongformerForMaskedLM''',
'''LongformerForMultipleChoice''',
'''LongformerForQuestionAnswering''',
'''LongformerForSequenceClassification''',
'''LongformerForTokenClassification''',
'''LongformerModel''',
'''LongformerPreTrainedModel''',
'''LongformerSelfAttention''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLongformerForMaskedLM''',
'''TFLongformerForMultipleChoice''',
'''TFLongformerForQuestionAnswering''',
'''TFLongformerForSequenceClassification''',
'''TFLongformerForTokenClassification''',
'''TFLongformerModel''',
'''TFLongformerPreTrainedModel''',
'''TFLongformerSelfAttention''',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
__lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 653 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : str = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''gpt_bigcode'''
a__ = ['''past_key_values''']
a__ = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self :List[Any] , lowerCAmelCase__ :Any=50_257 , lowerCAmelCase__ :Dict=1_024 , lowerCAmelCase__ :Optional[int]=768 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :int=12 , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :List[str]="gelu_pytorch_tanh" , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Any=1E-5 , lowerCAmelCase__ :Union[str, Any]=0.0_2 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :int=50_256 , lowerCAmelCase__ :List[str]=50_256 , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :str=True , lowerCAmelCase__ :int=True , **lowerCAmelCase__ :Union[str, Any] , ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = vocab_size
snake_case_ : Any = n_positions
snake_case_ : Any = n_embd
snake_case_ : Optional[Any] = n_layer
snake_case_ : List[Any] = n_head
snake_case_ : Tuple = n_inner
snake_case_ : str = activation_function
snake_case_ : Union[str, Any] = resid_pdrop
snake_case_ : Optional[Any] = embd_pdrop
snake_case_ : Any = attn_pdrop
snake_case_ : List[Any] = layer_norm_epsilon
snake_case_ : Tuple = initializer_range
snake_case_ : int = scale_attn_weights
snake_case_ : Union[str, Any] = use_cache
snake_case_ : Dict = attention_softmax_in_fpaa
snake_case_ : Any = scale_attention_softmax_in_fpaa
snake_case_ : List[str] = multi_query
snake_case_ : List[str] = bos_token_id
snake_case_ : Any = eos_token_id
super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
| 653 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__lowerCamelCase : Optional[int] = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class A_ :
"""simple docstring"""
def __init__( self :Tuple , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[Any]=16 , lowerCAmelCase__ :Any=13 , lowerCAmelCase__ :Optional[Any]=7 , lowerCAmelCase__ :str=14 , lowerCAmelCase__ :Union[str, Any]=10 , lowerCAmelCase__ :Tuple=19 , lowerCAmelCase__ :Optional[Any]=5 , lowerCAmelCase__ :Dict=4 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Any=16 , lowerCAmelCase__ :str=2 , lowerCAmelCase__ :List[Any]=4 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :str="gelu" , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :Optional[int]=[1, 2, 3, 4, 5] , lowerCAmelCase__ :str=25 , lowerCAmelCase__ :Optional[Any]=5 , ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = d_model
snake_case_ : Dict = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : Optional[Any] = prediction_length
snake_case_ : str = context_length
snake_case_ : Tuple = cardinality
snake_case_ : List[str] = num_time_features
snake_case_ : Optional[Any] = lags_sequence
snake_case_ : Union[str, Any] = embedding_dimension
snake_case_ : Optional[Any] = is_training
snake_case_ : Optional[Any] = hidden_size
snake_case_ : Any = num_hidden_layers
snake_case_ : Optional[Any] = num_attention_heads
snake_case_ : int = intermediate_size
snake_case_ : Any = hidden_act
snake_case_ : Union[str, Any] = hidden_dropout_prob
snake_case_ : Dict = attention_probs_dropout_prob
snake_case_ : List[str] = context_length
snake_case_ : Any = prediction_length + label_length
snake_case_ : Union[str, Any] = label_length
snake_case_ : List[Any] = moving_average
snake_case_ : str = autocorrelation_factor
def _A ( self :List[Any] ) -> Any:
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case_ : Any = config.context_length + max(config.lags_sequence )
snake_case_ : Union[str, Any] = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
snake_case_ : Optional[int] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
snake_case_ : List[Any] = floats_tensor([self.batch_size, _past_length] )
snake_case_ : Dict = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
snake_case_ : List[Any] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
snake_case_ : List[Any] = floats_tensor([self.batch_size, config.prediction_length] )
snake_case_ : int = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def _A ( self :Dict ) -> Tuple:
'''simple docstring'''
snake_case_ : str = self.get_config()
snake_case_ : int = self.prepare_autoformer_inputs_dict(lowerCAmelCase__ )
return config, inputs_dict
def _A ( self :Optional[int] ) -> Dict:
'''simple docstring'''
snake_case_, snake_case_ : Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def _A ( self :Tuple , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = AutoformerModel(config=lowerCAmelCase__ ).to(lowerCAmelCase__ ).eval()
snake_case_ : Optional[int] = model(**lowerCAmelCase__ )
snake_case_ : Any = outputs.encoder_last_hidden_state
snake_case_ : Dict = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Optional[Any] = model.get_encoder()
encoder.save_pretrained(lowerCAmelCase__ )
snake_case_ : Tuple = AutoformerEncoder.from_pretrained(lowerCAmelCase__ ).to(lowerCAmelCase__ )
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : List[str] = model.create_network_inputs(**lowerCAmelCase__ )
snake_case_, snake_case_ : Optional[int] = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
snake_case_ : List[Any] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
snake_case_ : Optional[int] = encoder(inputs_embeds=lowerCAmelCase__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
snake_case_ : Any = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
snake_case_ : List[str] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
snake_case_ : Optional[Any] = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
snake_case_ : Any = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : List[Any] = model.get_decoder()
decoder.save_pretrained(lowerCAmelCase__ )
snake_case_ : int = AutoformerDecoder.from_pretrained(lowerCAmelCase__ ).to(lowerCAmelCase__ )
snake_case_ : Tuple = decoder(
trend=lowerCAmelCase__ , inputs_embeds=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class A_ (a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a__ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
a__ = (AutoformerForPrediction,) if is_torch_available() else ()
a__ = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def _A ( self :Dict ) -> int:
'''simple docstring'''
snake_case_ : Tuple = AutoformerModelTester(self )
snake_case_ : str = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def _A ( self :List[str] ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def _A ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_, snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
snake_case_ : List[Any] = model_class(lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase__ )
snake_case_, snake_case_ : str = model_class.from_pretrained(lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
self.assertEqual(info["missing_keys"] , [] )
def _A ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
snake_case_ : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*lowerCAmelCase__ )
@unittest.skip(reason="Model has no tokens embeddings" )
def _A ( self :str ) -> str:
'''simple docstring'''
pass
def _A ( self :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[Any] = inspect.signature(getattr(lowerCAmelCase__ , "forward" ) )
# The main input is the name of the argument after `self`
snake_case_ : Dict = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , lowerCAmelCase__ )
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_, snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Tuple = model_class(lowerCAmelCase__ )
snake_case_ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : Optional[Any] = [*signature.parameters.keys()]
snake_case_ : Dict = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(lowerCAmelCase__ )] , lowerCAmelCase__ )
def _A ( self :int ) -> Any:
'''simple docstring'''
snake_case_, snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Union[str, Any] = True
snake_case_ : List[str] = getattr(self.model_tester , "seq_length" , lowerCAmelCase__ )
snake_case_ : Dict = getattr(self.model_tester , "decoder_seq_length" , lowerCAmelCase__ )
snake_case_ : Union[str, Any] = getattr(self.model_tester , "encoder_seq_length" , lowerCAmelCase__ )
snake_case_ : Union[str, Any] = getattr(self.model_tester , "d_model" , lowerCAmelCase__ )
snake_case_ : Dict = getattr(self.model_tester , "num_attention_heads" , lowerCAmelCase__ )
snake_case_ : Optional[int] = d_model // num_attention_heads
for model_class in self.all_model_classes:
snake_case_ : Any = True
snake_case_ : Any = False
snake_case_ : Dict = True
snake_case_ : List[str] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case_ : Tuple = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
snake_case_ : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case_ : Optional[int] = True
snake_case_ : Any = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case_ : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
snake_case_ : str = outputs.encoder_attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
snake_case_ : Tuple = len(lowerCAmelCase__ )
snake_case_ : List[str] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# decoder attentions
snake_case_ : Optional[int] = outputs.decoder_attentions
self.assertIsInstance(lowerCAmelCase__ , (list, tuple) )
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
snake_case_ : List[Any] = outputs.cross_attentions
self.assertIsInstance(lowerCAmelCase__ , (list, tuple) )
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
snake_case_ : Optional[int] = True
snake_case_ : List[Any] = True
snake_case_ : Union[str, Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case_ : List[Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(out_len + 2 , len(lowerCAmelCase__ ) )
snake_case_ : Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def _A ( self :Any ) -> Optional[Any]:
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def __UpperCAmelCase ( __magic_name__="train-batch.pt" )-> int:
"""simple docstring"""
snake_case_ : List[str] = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" ,filename=__magic_name__ ,repo_type="dataset" )
snake_case_ : List[str] = torch.load(__magic_name__ ,map_location=__magic_name__ )
return batch
@require_torch
@slow
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :str ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ )
snake_case_ : List[str] = prepare_batch()
with torch.no_grad():
snake_case_ : int = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
snake_case_ : Optional[int] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , lowerCAmelCase__ )
snake_case_ : Optional[Any] = torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=lowerCAmelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
def _A ( self :Any ) -> str:
'''simple docstring'''
snake_case_ : str = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ )
snake_case_ : Optional[Any] = prepare_batch("val-batch.pt" )
with torch.no_grad():
snake_case_ : Tuple = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
snake_case_ : Dict = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , lowerCAmelCase__ )
snake_case_ : Any = torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=lowerCAmelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
def _A ( self :List[str] ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ )
snake_case_ : str = prepare_batch("val-batch.pt" )
with torch.no_grad():
snake_case_ : Optional[Any] = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
snake_case_ : List[Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , lowerCAmelCase__ )
snake_case_ : Dict = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=lowerCAmelCase__ )
snake_case_ : Optional[Any] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , lowerCAmelCase__ , rtol=1E-1 ) )
| 653 | 1 |
'''simple docstring'''
from random import shuffle
import tensorflow as tf
from numpy import array
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Optional[int]:
"""simple docstring"""
snake_case_ : Union[str, Any] = int(__magic_name__ )
assert noofclusters < len(__magic_name__ )
# Find out the dimensionality
snake_case_ : Any = len(vectors[0] )
# Will help select random centroids from among the available vectors
snake_case_ : Union[str, Any] = list(range(len(__magic_name__ ) ) )
shuffle(__magic_name__ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
snake_case_ : Dict = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
snake_case_ : str = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
snake_case_ : int = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(__magic_name__ )
]
##These nodes will assign the centroid Variables the appropriate
##values
snake_case_ : Union[str, Any] = tf.placeholder("float64" ,[dim] )
snake_case_ : Optional[Any] = []
for centroid in centroids:
cent_assigns.append(tf.assign(__magic_name__ ,__magic_name__ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
snake_case_ : Optional[Any] = [tf.Variable(0 ) for i in range(len(__magic_name__ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
snake_case_ : Tuple = tf.placeholder("int32" )
snake_case_ : Any = []
for assignment in assignments:
cluster_assigns.append(tf.assign(__magic_name__ ,__magic_name__ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
snake_case_ : List[Any] = tf.placeholder("float" ,[None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
snake_case_ : Union[str, Any] = tf.reduce_mean(__magic_name__ ,0 )
##Node for computing Euclidean distances
# Placeholders for input
snake_case_ : Optional[Any] = tf.placeholder("float" ,[dim] )
snake_case_ : str = tf.placeholder("float" ,[dim] )
snake_case_ : Union[str, Any] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(__magic_name__ ,__magic_name__ ) ,2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
snake_case_ : List[str] = tf.placeholder("float" ,[noofclusters] )
snake_case_ : List[str] = tf.argmin(__magic_name__ ,0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
snake_case_ : Dict = tf.initialize_all_variables()
# Initialize all variables
sess.run(__magic_name__ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
snake_case_ : int = 100
for _ in range(__magic_name__ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(__magic_name__ ) ):
snake_case_ : Optional[Any] = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
snake_case_ : int = [
sess.run(__magic_name__ ,feed_dict={va: vect, va: sess.run(__magic_name__ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
snake_case_ : str = sess.run(
__magic_name__ ,feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] ,feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(__magic_name__ ):
# Collect all the vectors assigned to this cluster
snake_case_ : Tuple = [
vectors[i]
for i in range(len(__magic_name__ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
snake_case_ : int = sess.run(
__magic_name__ ,feed_dict={mean_input: array(__magic_name__ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] ,feed_dict={centroid_value: new_location} )
# Return centroids and assignments
snake_case_ : Tuple = sess.run(__magic_name__ )
snake_case_ : Union[str, Any] = sess.run(__magic_name__ )
return centroids, assignments
| 653 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = RobertaTokenizer
a__ = RobertaTokenizerFast
a__ = True
a__ = {'''cls_token''': '''<s>'''}
def _A ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case_ : List[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
snake_case_ : Tuple = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
snake_case_ : List[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
snake_case_ : int = {"unk_token": "<unk>"}
snake_case_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
def _A ( self :Optional[Any] , **lowerCAmelCase__ :str ) -> str:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _A ( self :Any , **lowerCAmelCase__ :Tuple ) -> Optional[int]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _A ( self :Optional[int] , lowerCAmelCase__ :str ) -> Optional[int]:
'''simple docstring'''
snake_case_ : int = "lower newer"
snake_case_ : Tuple = "lower newer"
return input_text, output_text
def _A ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case_ : Dict = "lower newer"
snake_case_ : int = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
snake_case_ : str = tokenizer.tokenize(lowerCAmelCase__ ) # , add_prefix_space=True)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[str] = tokens + [tokenizer.unk_token]
snake_case_ : Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _A ( self :Any ) -> str:
'''simple docstring'''
snake_case_ : List[str] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=lowerCAmelCase__ ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=lowerCAmelCase__ ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def _A ( self :str ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = self.tokenizer_class.from_pretrained("roberta-base" )
snake_case_ : List[str] = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase__ )
snake_case_ : List[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase__ )
snake_case_ : List[str] = tokenizer.encode(
"sequence builders" , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
snake_case_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
snake_case_ : Any = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _A ( self :List[Any] ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = self.get_tokenizer()
snake_case_ : Tuple = "Encode this sequence."
snake_case_ : Optional[Any] = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
snake_case_ : str = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
snake_case_ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
snake_case_ : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
snake_case_ : str = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
snake_case_ : int = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Testing spaces after special tokens
snake_case_ : List[Any] = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )} ) # mask token has a left space
snake_case_ : str = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
snake_case_ : List[str] = "Encode <mask> sequence"
snake_case_ : List[Any] = "Encode <mask>sequence"
snake_case_ : Tuple = tokenizer.encode(lowerCAmelCase__ )
snake_case_ : int = encoded.index(lowerCAmelCase__ )
snake_case_ : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[str] = tokenizer.encode(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = encoded.index(lowerCAmelCase__ )
snake_case_ : int = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _A ( self :Tuple ) -> Tuple:
'''simple docstring'''
pass
def _A ( self :int ) -> Optional[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case_ : List[Any] = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case_ : Any = "A, <mask> AllenNLP sentence."
snake_case_ : str = tokenizer_r.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
snake_case_ : int = tokenizer_p.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
snake_case_ : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
snake_case_ : str = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def _A ( self :int ) -> Tuple:
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
snake_case_ : str = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
snake_case_ : Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , lowerCAmelCase__ )
self.assertEqual(post_processor_state["add_prefix_space"] , lowerCAmelCase__ )
self.assertEqual(post_processor_state["trim_offsets"] , lowerCAmelCase__ )
def _A ( self :List[str] ) -> List[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ : str = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
snake_case_ : Tuple = F'''{text_of_1_token} {text_of_1_token}'''
snake_case_ : Any = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : List[str] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Tuple = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ), len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : Dict = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : str = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ), len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : Tuple = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
snake_case_ : Dict = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ) + 1, 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : Any = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Any = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ), 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Optional[int] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ), 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
| 653 | 1 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__lowerCamelCase : Any = logging.get_logger(__name__)
__lowerCamelCase : List[Any] = {'''vocab_file''': '''spiece.model'''}
__lowerCamelCase : int = {
'''vocab_file''': {
'''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''',
}
}
class A_ (a_ ):
"""simple docstring"""
def __init__( self :Tuple , lowerCAmelCase__ :str , lowerCAmelCase__ :List[str]=False , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :List[str]=False , lowerCAmelCase__ :Optional[int]="<s>" , lowerCAmelCase__ :Tuple="</s>" , lowerCAmelCase__ :Tuple="<unk>" , lowerCAmelCase__ :Optional[Any]="<sep>" , lowerCAmelCase__ :Union[str, Any]="<pad>" , lowerCAmelCase__ :List[Any]="<cls>" , lowerCAmelCase__ :int="<mask>" , lowerCAmelCase__ :Optional[int]=["<eop>", "<eod>"] , lowerCAmelCase__ :Optional[Dict[str, Any]] = None , **lowerCAmelCase__ :Any , ) -> None:
'''simple docstring'''
snake_case_ : Tuple = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
snake_case_ : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
snake_case_ : List[str] = 3
snake_case_ : Optional[Any] = do_lower_case
snake_case_ : Dict = remove_space
snake_case_ : Any = keep_accents
snake_case_ : List[str] = vocab_file
snake_case_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase__ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
snake_case_ : Union[str, Any] = jieba
snake_case_ : str = str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _A ( self :Tuple ) -> Optional[int]:
'''simple docstring'''
return len(self.sp_model )
def _A ( self :Tuple ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self :Optional[Any] ) -> Any:
'''simple docstring'''
snake_case_ : Dict = self.__dict__.copy()
snake_case_ : Optional[Any] = None
return state
def __setstate__( self :Optional[Any] , lowerCAmelCase__ :Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case_ : int = {}
snake_case_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _A ( self :Tuple , lowerCAmelCase__ :Any ) -> Optional[int]:
'''simple docstring'''
if self.remove_space:
snake_case_ : List[Any] = " ".join(inputs.strip().split() )
else:
snake_case_ : Any = inputs
snake_case_ : str = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
snake_case_ : Optional[Any] = unicodedata.normalize("NFKD" , lowerCAmelCase__ )
snake_case_ : Union[str, Any] = "".join([c for c in outputs if not unicodedata.combining(lowerCAmelCase__ )] )
if self.do_lower_case:
snake_case_ : List[str] = outputs.lower()
return outputs
def _A ( self :str , lowerCAmelCase__ :str ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = self.preprocess_text(lowerCAmelCase__ )
snake_case_ : Optional[Any] = self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
snake_case_ : Optional[int] = []
for piece in pieces:
if len(lowerCAmelCase__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
snake_case_ : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCAmelCase__ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
snake_case_ : str = cur_pieces[1:]
else:
snake_case_ : Union[str, Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowerCAmelCase__ )
else:
new_pieces.append(lowerCAmelCase__ )
return new_pieces
def _A ( self :Dict , lowerCAmelCase__ :Union[str, Any] ) -> Dict:
'''simple docstring'''
return self.sp_model.PieceToId(lowerCAmelCase__ )
def _A ( self :int , lowerCAmelCase__ :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return self.sp_model.IdToPiece(lowerCAmelCase__ )
def _A ( self :List[Any] , lowerCAmelCase__ :Any ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = "".join(lowerCAmelCase__ ).replace(lowerCAmelCase__ , " " ).strip()
return out_string
def _A ( self :int , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case_ : Dict = [self.sep_token_id]
snake_case_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _A ( self :Union[str, Any] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None , lowerCAmelCase__ :bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is not None:
return ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1]
return ([0] * len(lowerCAmelCase__ )) + [1, 1]
def _A ( self :int , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case_ : Optional[int] = [self.sep_token_id]
snake_case_ : str = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _A ( self :List[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ : int = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , "wb" ) as fi:
snake_case_ : Any = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
def _A ( self :Union[str, Any] , *lowerCAmelCase__ :List[Any] , **lowerCAmelCase__ :str ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = super()._decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case_ : str = text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 653 |
'''simple docstring'''
import math
def __UpperCAmelCase ( __magic_name__ )-> bool:
"""simple docstring"""
snake_case_ : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__magic_name__ )
def __UpperCAmelCase ( __magic_name__ = 1 / 1_2345 )-> int:
"""simple docstring"""
snake_case_ : Any = 0
snake_case_ : int = 0
snake_case_ : Union[str, Any] = 3
while True:
snake_case_ : Any = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__magic_name__ ):
snake_case_ : Optional[Any] = int(__magic_name__ )
total_partitions += 1
if check_partition_perfect(__magic_name__ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__magic_name__ )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 653 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''roc_bert'''
def __init__( self :Dict , lowerCAmelCase__ :Optional[Any]=30_522 , lowerCAmelCase__ :Dict=768 , lowerCAmelCase__ :str=12 , lowerCAmelCase__ :Optional[int]=12 , lowerCAmelCase__ :Optional[Any]=3_072 , lowerCAmelCase__ :Any="gelu" , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :List[str]=512 , lowerCAmelCase__ :int=2 , lowerCAmelCase__ :Optional[int]=0.0_2 , lowerCAmelCase__ :Tuple=1E-1_2 , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :List[str]=0 , lowerCAmelCase__ :Optional[Any]="absolute" , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :List[str]=768 , lowerCAmelCase__ :Optional[Any]=910 , lowerCAmelCase__ :str=512 , lowerCAmelCase__ :int=24_858 , lowerCAmelCase__ :List[Any]=True , **lowerCAmelCase__ :int , ) -> List[str]:
'''simple docstring'''
snake_case_ : int = vocab_size
snake_case_ : Dict = max_position_embeddings
snake_case_ : int = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : int = intermediate_size
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Optional[int] = hidden_dropout_prob
snake_case_ : List[Any] = attention_probs_dropout_prob
snake_case_ : Dict = initializer_range
snake_case_ : str = type_vocab_size
snake_case_ : Tuple = layer_norm_eps
snake_case_ : Optional[Any] = use_cache
snake_case_ : Optional[Any] = enable_pronunciation
snake_case_ : List[Any] = enable_shape
snake_case_ : Optional[int] = pronunciation_embed_dim
snake_case_ : Dict = pronunciation_vocab_size
snake_case_ : int = shape_embed_dim
snake_case_ : Any = shape_vocab_size
snake_case_ : Optional[int] = concat_input
snake_case_ : List[Any] = position_embedding_type
snake_case_ : Any = classifier_dropout
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
| 653 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : int = logging.get_logger()
@dataclass
class A_ :
"""simple docstring"""
a__ = 42
a__ = field(default_factory=a_ )
a__ = field(default_factory=a_ )
def _A ( self :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tensor , lowerCAmelCase__ :Tensor ) -> int:
'''simple docstring'''
snake_case_ : int = len(list(m.modules() ) ) == 1 or isinstance(lowerCAmelCase__ , nn.Convad ) or isinstance(lowerCAmelCase__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowerCAmelCase__ )
def __call__( self :List[Any] , lowerCAmelCase__ :Tensor ) -> Union[str, Any]:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowerCAmelCase__ )
[x.remove() for x in self.handles]
return self
@property
def _A ( self :int ) -> List[Any]:
'''simple docstring'''
return list(filter(lambda lowerCAmelCase__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class A_ :
"""simple docstring"""
a__ = 42
a__ = 42
a__ = 0
a__ = field(default_factory=a_ )
a__ = field(default_factory=a_ )
def __call__( self :Tuple , lowerCAmelCase__ :Tensor ) -> Tuple:
'''simple docstring'''
snake_case_ : List[Any] = Tracker(self.dest )(lowerCAmelCase__ ).parametrized
snake_case_ : Tuple = Tracker(self.src )(lowerCAmelCase__ ).parametrized
snake_case_ : List[str] = list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.src_skip , lowerCAmelCase__ ) )
snake_case_ : Tuple = list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.dest_skip , lowerCAmelCase__ ) )
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise Exception(
F'''Numbers of operations are different. Source module has {len(lowerCAmelCase__ )} operations while'''
F''' destination module has {len(lowerCAmelCase__ )}.''' )
for dest_m, src_m in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ = True )-> Optional[int]:
"""simple docstring"""
print(F'''Converting {name}...''' )
with torch.no_grad():
snake_case_ : List[str] = timm.create_model(__magic_name__ ,pretrained=__magic_name__ ).eval()
snake_case_ : Optional[int] = ResNetForImageClassification(__magic_name__ ).eval()
snake_case_ : Dict = ModuleTransfer(src=__magic_name__ ,dest=__magic_name__ )
snake_case_ : Optional[int] = torch.randn((1, 3, 224, 224) )
module_transfer(__magic_name__ )
assert torch.allclose(from_model(__magic_name__ ) ,our_model(__magic_name__ ).logits ), "The model logits don't match the original one."
snake_case_ : str = F'''resnet{'-'.join(name.split('resnet' ) )}'''
print(__magic_name__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name ,commit_message="Add model" ,use_temp_dir=__magic_name__ ,)
# we can use the convnext one
snake_case_ : Optional[Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name ,commit_message="Add image processor" ,use_temp_dir=__magic_name__ ,)
print(F'''Pushed {checkpoint_name}''' )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = None ,__magic_name__ = True )-> Tuple:
"""simple docstring"""
snake_case_ : List[str] = "imagenet-1k-id2label.json"
snake_case_ : Optional[Any] = 1000
snake_case_ : List[Any] = (1, num_labels)
snake_case_ : Optional[Any] = "huggingface/label-files"
snake_case_ : Dict = num_labels
snake_case_ : List[Any] = json.load(open(hf_hub_download(__magic_name__ ,__magic_name__ ,repo_type="dataset" ) ,"r" ) )
snake_case_ : List[str] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case_ : Any = idalabel
snake_case_ : List[Any] = {v: k for k, v in idalabel.items()}
snake_case_ : Optional[int] = partial(__magic_name__ ,num_labels=__magic_name__ ,idalabel=__magic_name__ ,labelaid=__magic_name__ )
snake_case_ : Optional[int] = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] ,hidden_sizes=[64, 128, 256, 512] ,layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] ,hidden_sizes=[64, 128, 256, 512] ,layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(__magic_name__ ,names_to_config[model_name] ,__magic_name__ ,__magic_name__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
return config, expected_shape
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
__lowerCamelCase : Tuple = parser.parse_args()
__lowerCamelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 653 | 1 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class A_ :
"""simple docstring"""
def __init__( self :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :int=False , lowerCAmelCase__ :Any=10 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :int=32 * 4 , lowerCAmelCase__ :Any=32 * 6 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :Any=32 , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Any = parent
snake_case_ : Tuple = batch_size
snake_case_ : List[Any] = is_training
snake_case_ : Optional[Any] = use_auxiliary_loss
snake_case_ : str = num_queries
snake_case_ : List[Any] = num_channels
snake_case_ : Tuple = min_size
snake_case_ : List[str] = max_size
snake_case_ : List[Any] = num_labels
snake_case_ : Optional[int] = mask_feature_size
def _A ( self :Tuple ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCAmelCase__ )
snake_case_ : Union[str, Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCAmelCase__ )
snake_case_ : Optional[Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCAmelCase__ ) > 0.5
).float()
snake_case_ : Tuple = (torch.rand((self.batch_size, self.num_labels) , device=lowerCAmelCase__ ) > 0.5).long()
snake_case_ : List[Any] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _A ( self :int ) -> Optional[Any]:
'''simple docstring'''
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def _A ( self :Any ) -> int:
'''simple docstring'''
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : Tuple = self.prepare_config_and_inputs()
snake_case_ : List[Any] = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def _A ( self :Optional[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :str ) -> List[str]:
'''simple docstring'''
snake_case_ : List[str] = output.encoder_hidden_states
snake_case_ : Tuple = output.pixel_decoder_hidden_states
snake_case_ : List[str] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCAmelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCAmelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCAmelCase__ ) , config.decoder_config.decoder_layers )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any]=False ) -> List[str]:
'''simple docstring'''
with torch.no_grad():
snake_case_ : Optional[Any] = MaskFormerModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : Optional[int] = model(pixel_values=lowerCAmelCase__ , pixel_mask=lowerCAmelCase__ )
snake_case_ : Optional[Any] = model(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCAmelCase__ , lowerCAmelCase__ )
def _A ( self :Optional[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[str] = MaskFormerForInstanceSegmentation(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
def comm_check_on_output(lowerCAmelCase__ :Dict ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
snake_case_ : Optional[int] = model(pixel_values=lowerCAmelCase__ , pixel_mask=lowerCAmelCase__ )
snake_case_ : Any = model(lowerCAmelCase__ )
comm_check_on_output(lowerCAmelCase__ )
snake_case_ : Tuple = model(
pixel_values=lowerCAmelCase__ , pixel_mask=lowerCAmelCase__ , mask_labels=lowerCAmelCase__ , class_labels=lowerCAmelCase__ )
comm_check_on_output(lowerCAmelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class A_ (a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a__ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
a__ = (
{'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
a__ = False
def _A ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = MaskFormerModelTester(self )
snake_case_ : Optional[int] = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def _A ( self :Optional[int] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_, snake_case_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCAmelCase__ , **lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ )
def _A ( self :Optional[int] ) -> Dict:
'''simple docstring'''
snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*lowerCAmelCase__ )
@unittest.skip(reason="MaskFormer does not use inputs_embeds" )
def _A ( self :Any ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" )
def _A ( self :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason="MaskFormer is not a generative model" )
def _A ( self :Any ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason="MaskFormer does not use token embeddings" )
def _A ( self :int ) -> int:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def _A ( self :List[Any] ) -> str:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
pass
def _A ( self :int ) -> Optional[Any]:
'''simple docstring'''
snake_case_, snake_case_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : int = model_class(lowerCAmelCase__ )
snake_case_ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : Optional[int] = [*signature.parameters.keys()]
snake_case_ : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
@slow
def _A ( self :Tuple ) -> str:
'''simple docstring'''
for model_name in ["facebook/maskformer-swin-small-coco"]:
snake_case_ : Dict = MaskFormerModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def _A ( self :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = (self.model_tester.min_size,) * 2
snake_case_ : Optional[int] = {
"pixel_values": torch.randn((2, 3, *size) , device=lowerCAmelCase__ ),
"mask_labels": torch.randn((2, 10, *size) , device=lowerCAmelCase__ ),
"class_labels": torch.zeros(2 , 10 , device=lowerCAmelCase__ ).long(),
}
snake_case_ : Dict = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(lowerCAmelCase__ )
snake_case_ : Optional[int] = model(**lowerCAmelCase__ )
self.assertTrue(outputs.loss is not None )
def _A ( self :Optional[Any] ) -> Any:
'''simple docstring'''
snake_case_, snake_case_ : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCAmelCase__ , **lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ )
def _A ( self :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
snake_case_, snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Dict = model_class(lowerCAmelCase__ ).to(lowerCAmelCase__ )
snake_case_ : Dict = model(**lowerCAmelCase__ , output_attentions=lowerCAmelCase__ )
self.assertTrue(outputs.attentions is not None )
def _A ( self :Tuple ) -> Any:
'''simple docstring'''
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
snake_case_ : Optional[int] = self.all_model_classes[1]
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
snake_case_ : Dict = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
snake_case_ : List[str] = model(lowerCAmelCase__ , mask_labels=lowerCAmelCase__ , class_labels=lowerCAmelCase__ ).loss
loss.backward()
def _A ( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.all_model_classes[1]
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : int = self.model_tester.prepare_config_and_inputs()
snake_case_ : Any = True
snake_case_ : Optional[Any] = True
snake_case_ : Optional[int] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
snake_case_ : int = model(lowerCAmelCase__ , mask_labels=lowerCAmelCase__ , class_labels=lowerCAmelCase__ )
snake_case_ : Dict = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
snake_case_ : str = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
snake_case_ : Tuple = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
snake_case_ : Tuple = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCAmelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__lowerCamelCase : str = 1E-4
def __UpperCAmelCase ( )-> Union[str, Any]:
"""simple docstring"""
snake_case_ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class A_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def _A ( self :Dict ) -> List[Any]:
'''simple docstring'''
return (
MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" )
if is_vision_available()
else None
)
def _A ( self :Any ) -> Dict:
'''simple docstring'''
snake_case_ : int = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(lowerCAmelCase__ )
snake_case_ : Tuple = self.default_image_processor
snake_case_ : List[Any] = prepare_img()
snake_case_ : Optional[int] = image_processor(lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ )
snake_case_ : str = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCAmelCase__ , (1, 3, 800, 1_088) )
with torch.no_grad():
snake_case_ : str = model(**lowerCAmelCase__ )
snake_case_ : Dict = torch.tensor(
[[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(lowerCAmelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
snake_case_ : Optional[Any] = torch.tensor(
[[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(lowerCAmelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
snake_case_ : int = torch.tensor(
[[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(lowerCAmelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
def _A ( self :Any ) -> List[Any]:
'''simple docstring'''
snake_case_ : Any = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(lowerCAmelCase__ )
.eval()
)
snake_case_ : List[Any] = self.default_image_processor
snake_case_ : Optional[Any] = prepare_img()
snake_case_ : str = image_processor(lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ )
snake_case_ : str = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCAmelCase__ , (1, 3, 800, 1_088) )
with torch.no_grad():
snake_case_ : Optional[Any] = model(**lowerCAmelCase__ )
# masks_queries_logits
snake_case_ : Tuple = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
snake_case_ : Optional[Any] = [
[-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3],
[-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5],
[-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2],
]
snake_case_ : Dict = torch.tensor(lowerCAmelCase__ ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
# class_queries_logits
snake_case_ : Union[str, Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
snake_case_ : Dict = torch.tensor(
[
[1.6_5_1_2E0_0, -5.2_5_7_2E0_0, -3.3_5_1_9E0_0],
[3.6_1_6_9E-0_2, -5.9_0_2_5E0_0, -2.9_3_1_3E0_0],
[1.0_7_6_6E-0_4, -7.7_6_3_0E0_0, -5.1_2_6_3E0_0],
] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
def _A ( self :Tuple ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" )
.to(lowerCAmelCase__ )
.eval()
)
snake_case_ : str = self.default_image_processor
snake_case_ : Dict = prepare_img()
snake_case_ : str = image_processor(lowerCAmelCase__ , return_tensors="pt" ).to(lowerCAmelCase__ )
snake_case_ : str = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCAmelCase__ , (1, 3, 800, 1_088) )
with torch.no_grad():
snake_case_ : Optional[Any] = model(**lowerCAmelCase__ )
# masks_queries_logits
snake_case_ : int = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
snake_case_ : List[Any] = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]]
snake_case_ : Dict = torch.tensor(lowerCAmelCase__ ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
# class_queries_logits
snake_case_ : int = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
snake_case_ : List[Any] = torch.tensor(
[[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
def _A ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(lowerCAmelCase__ )
.eval()
)
snake_case_ : int = self.default_image_processor
snake_case_ : List[Any] = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , )
snake_case_ : int = inputs["pixel_values"].to(lowerCAmelCase__ )
snake_case_ : Tuple = [el.to(lowerCAmelCase__ ) for el in inputs["mask_labels"]]
snake_case_ : Dict = [el.to(lowerCAmelCase__ ) for el in inputs["class_labels"]]
with torch.no_grad():
snake_case_ : Tuple = model(**lowerCAmelCase__ )
self.assertTrue(outputs.loss is not None )
| 653 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''roc_bert'''
def __init__( self :Dict , lowerCAmelCase__ :Optional[Any]=30_522 , lowerCAmelCase__ :Dict=768 , lowerCAmelCase__ :str=12 , lowerCAmelCase__ :Optional[int]=12 , lowerCAmelCase__ :Optional[Any]=3_072 , lowerCAmelCase__ :Any="gelu" , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :List[str]=512 , lowerCAmelCase__ :int=2 , lowerCAmelCase__ :Optional[int]=0.0_2 , lowerCAmelCase__ :Tuple=1E-1_2 , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :List[str]=0 , lowerCAmelCase__ :Optional[Any]="absolute" , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :List[str]=768 , lowerCAmelCase__ :Optional[Any]=910 , lowerCAmelCase__ :str=512 , lowerCAmelCase__ :int=24_858 , lowerCAmelCase__ :List[Any]=True , **lowerCAmelCase__ :int , ) -> List[str]:
'''simple docstring'''
snake_case_ : int = vocab_size
snake_case_ : Dict = max_position_embeddings
snake_case_ : int = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : int = intermediate_size
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Optional[int] = hidden_dropout_prob
snake_case_ : List[Any] = attention_probs_dropout_prob
snake_case_ : Dict = initializer_range
snake_case_ : str = type_vocab_size
snake_case_ : Tuple = layer_norm_eps
snake_case_ : Optional[Any] = use_cache
snake_case_ : Optional[Any] = enable_pronunciation
snake_case_ : List[Any] = enable_shape
snake_case_ : Optional[int] = pronunciation_embed_dim
snake_case_ : Dict = pronunciation_vocab_size
snake_case_ : int = shape_embed_dim
snake_case_ : Any = shape_vocab_size
snake_case_ : Optional[int] = concat_input
snake_case_ : List[Any] = position_embedding_type
snake_case_ : Any = classifier_dropout
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
| 653 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> float:
"""simple docstring"""
def get_matched_characters(__magic_name__ ,__magic_name__ ) -> str:
snake_case_ : int = []
snake_case_ : Any = min(len(_stra ) ,len(_stra ) ) // 2
for i, l in enumerate(_stra ):
snake_case_ : Union[str, Any] = int(max(0 ,i - limit ) )
snake_case_ : Optional[Any] = int(min(i + limit + 1 ,len(_stra ) ) )
if l in _stra[left:right]:
matched.append(__magic_name__ )
snake_case_ : Tuple = F'''{_stra[0:_stra.index(__magic_name__ )]} {_stra[_stra.index(__magic_name__ ) + 1:]}'''
return "".join(__magic_name__ )
# matching characters
snake_case_ : List[Any] = get_matched_characters(__magic_name__ ,__magic_name__ )
snake_case_ : Tuple = get_matched_characters(__magic_name__ ,__magic_name__ )
snake_case_ : Union[str, Any] = len(__magic_name__ )
# transposition
snake_case_ : Any = (
len([(ca, ca) for ca, ca in zip(__magic_name__ ,__magic_name__ ) if ca != ca] ) // 2
)
if not match_count:
snake_case_ : Dict = 0.0
else:
snake_case_ : List[Any] = (
1
/ 3
* (
match_count / len(__magic_name__ )
+ match_count / len(__magic_name__ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
snake_case_ : Optional[Any] = 0
for ca, ca in zip(stra[:4] ,stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('''hello''', '''world'''))
| 653 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
def update_area_of_max_square(__magic_name__ ,__magic_name__ ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
snake_case_ : str = update_area_of_max_square(__magic_name__ ,col + 1 )
snake_case_ : Dict = update_area_of_max_square(row + 1 ,col + 1 )
snake_case_ : int = update_area_of_max_square(row + 1 ,__magic_name__ )
if mat[row][col]:
snake_case_ : str = 1 + min([right, diagonal, down] )
snake_case_ : Tuple = max(largest_square_area[0] ,__magic_name__ )
return sub_problem_sol
else:
return 0
snake_case_ : Union[str, Any] = [0]
update_area_of_max_square(0 ,0 )
return largest_square_area[0]
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
__magic_name__ ,__magic_name__ ,__magic_name__ ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
snake_case_ : Dict = update_area_of_max_square_using_dp_array(__magic_name__ ,col + 1 ,__magic_name__ )
snake_case_ : List[Any] = update_area_of_max_square_using_dp_array(row + 1 ,col + 1 ,__magic_name__ )
snake_case_ : Any = update_area_of_max_square_using_dp_array(row + 1 ,__magic_name__ ,__magic_name__ )
if mat[row][col]:
snake_case_ : int = 1 + min([right, diagonal, down] )
snake_case_ : Tuple = max(largest_square_area[0] ,__magic_name__ )
snake_case_ : Optional[Any] = sub_problem_sol
return sub_problem_sol
else:
return 0
snake_case_ : List[Any] = [0]
snake_case_ : Optional[int] = [[-1] * cols for _ in range(__magic_name__ )]
update_area_of_max_square_using_dp_array(0 ,0 ,__magic_name__ )
return largest_square_area[0]
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
snake_case_ : Dict = [[0] * (cols + 1) for _ in range(rows + 1 )]
snake_case_ : Dict = 0
for row in range(rows - 1 ,-1 ,-1 ):
for col in range(cols - 1 ,-1 ,-1 ):
snake_case_ : List[str] = dp_array[row][col + 1]
snake_case_ : Any = dp_array[row + 1][col + 1]
snake_case_ : Any = dp_array[row + 1][col]
if mat[row][col] == 1:
snake_case_ : Any = 1 + min(__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : str = max(dp_array[row][col] ,__magic_name__ )
else:
snake_case_ : Optional[Any] = 0
return largest_square_area
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
snake_case_ : str = [0] * (cols + 1)
snake_case_ : Tuple = [0] * (cols + 1)
snake_case_ : List[str] = 0
for row in range(rows - 1 ,-1 ,-1 ):
for col in range(cols - 1 ,-1 ,-1 ):
snake_case_ : Optional[Any] = current_row[col + 1]
snake_case_ : Optional[int] = next_row[col + 1]
snake_case_ : Dict = next_row[col]
if mat[row][col] == 1:
snake_case_ : Union[str, Any] = 1 + min(__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : Any = max(current_row[col] ,__magic_name__ )
else:
snake_case_ : Dict = 0
snake_case_ : Optional[Any] = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 653 | 1 |
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __UpperCAmelCase ( __magic_name__ )-> int: # picklable for multiprocessing
"""simple docstring"""
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
snake_case_ : str = [1, 2, 3]
with pytest.raises(__magic_name__ ):
with parallel_backend("unsupported backend" ):
map_nested(__magic_name__ ,__magic_name__ ,num_proc=2 )
with pytest.raises(__magic_name__ ):
with parallel_backend("unsupported backend" ):
map_nested(__magic_name__ ,__magic_name__ ,num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" ,[2, -1] )
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = [1, 2]
snake_case_ : Union[str, Any] = {"a": 1, "b": 2}
snake_case_ : str = {"a": [1, 2], "b": [3, 4]}
snake_case_ : List[str] = {"a": {"1": 1}, "b": 2}
snake_case_ : Optional[int] = {"a": 1, "b": 2, "c": 3, "d": 4}
snake_case_ : Tuple = [2, 3]
snake_case_ : str = {"a": 2, "b": 3}
snake_case_ : Dict = {"a": [2, 3], "b": [4, 5]}
snake_case_ : List[Any] = {"a": {"1": 2}, "b": 3}
snake_case_ : str = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
| 653 |
'''simple docstring'''
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=7 )-> Tuple:
"""simple docstring"""
snake_case_ : List[str] = None
if token is not None:
snake_case_ : List[str] = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
snake_case_ : Dict = "636036"
snake_case_ : List[str] = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
snake_case_ : Optional[Any] = requests.get(__magic_name__ ,headers=__magic_name__ ).json()
return result["workflow_runs"]
def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]:
"""simple docstring"""
snake_case_ : str = get_daily_ci_runs(__magic_name__ )
snake_case_ : Optional[int] = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
snake_case_ : Dict = workflow_run["id"]
break
return workflow_run_id
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = get_last_daily_ci_runs(__magic_name__ )
if workflow_run_id is not None:
snake_case_ : Union[str, Any] = get_artifacts_links(worflow_run_id=__magic_name__ ,token=__magic_name__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
snake_case_ : Union[str, Any] = artifacts_links[artifact_name]
download_artifact(
artifact_name=__magic_name__ ,artifact_url=__magic_name__ ,output_dir=__magic_name__ ,token=__magic_name__ )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[Any]:
"""simple docstring"""
get_last_daily_ci_artifacts(__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : Union[str, Any] = {}
for artifact_name in artifact_names:
snake_case_ : Any = os.path.join(__magic_name__ ,F'''{artifact_name}.zip''' )
if os.path.isfile(__magic_name__ ):
snake_case_ : Tuple = {}
with zipfile.ZipFile(__magic_name__ ) as z:
for filename in z.namelist():
if not os.path.isdir(__magic_name__ ):
# read the file
with z.open(__magic_name__ ) as f:
snake_case_ : Optional[Any] = f.read().decode("UTF-8" )
return results
| 653 | 1 |
'''simple docstring'''
import itertools
import string
from collections.abc import Generator, Iterable
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Generator[tuple[str, ...], None, None]:
"""simple docstring"""
snake_case_ : List[str] = iter(__magic_name__ )
while True:
snake_case_ : Dict = tuple(itertools.islice(__magic_name__ ,__magic_name__ ) )
if not chunk:
return
yield chunk
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Dict = "".join([c.upper() for c in dirty if c in string.ascii_letters] )
snake_case_ : Optional[int] = ""
if len(__magic_name__ ) < 2:
return dirty
for i in range(len(__magic_name__ ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(__magic_name__ ) & 1:
clean += "X"
return clean
def __UpperCAmelCase ( __magic_name__ )-> list[str]:
"""simple docstring"""
snake_case_ : int = "ABCDEFGHIKLMNOPQRSTUVWXYZ"
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
snake_case_ : Tuple = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(__magic_name__ )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(__magic_name__ )
return table
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Optional[int] = generate_table(__magic_name__ )
snake_case_ : Any = prepare_input(__magic_name__ )
snake_case_ : List[Any] = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__magic_name__ ,2 ):
snake_case_, snake_case_ : Dict = divmod(table.index(__magic_name__ ) ,5 )
snake_case_, snake_case_ : List[str] = divmod(table.index(__magic_name__ ) ,5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : List[str] = generate_table(__magic_name__ )
snake_case_ : List[Any] = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__magic_name__ ,2 ):
snake_case_, snake_case_ : Optional[int] = divmod(table.index(__magic_name__ ) ,5 )
snake_case_, snake_case_ : Optional[Any] = divmod(table.index(__magic_name__ ) ,5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 653 |
'''simple docstring'''
from string import ascii_uppercase
__lowerCamelCase : Optional[Any] = {char: i for i, char in enumerate(ascii_uppercase)}
__lowerCamelCase : List[str] = dict(enumerate(ascii_uppercase))
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Tuple = len(__magic_name__ )
snake_case_ : str = 0
while True:
if x == i:
snake_case_ : List[str] = 0
if len(__magic_name__ ) == len(__magic_name__ ):
break
key += key[i]
i += 1
return key
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : str = ""
snake_case_ : List[Any] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
snake_case_ : Optional[Any] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Dict = ""
snake_case_ : Dict = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
snake_case_ : str = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __UpperCAmelCase ( )-> None:
"""simple docstring"""
snake_case_ : List[str] = "THE GERMAN ATTACK"
snake_case_ : List[str] = "SECRET"
snake_case_ : Optional[int] = generate_key(__magic_name__ ,__magic_name__ )
snake_case_ : Any = cipher_text(__magic_name__ ,__magic_name__ )
print(F'''Encrypted Text = {s}''' )
print(F'''Original Text = {original_text(__magic_name__ ,__magic_name__ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 653 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase : Any = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''ViTFeatureExtractor''']
__lowerCamelCase : Any = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__lowerCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 653 |
'''simple docstring'''
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Dict:
"""simple docstring"""
snake_case_ : Tuple = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
snake_case_ : Union[str, Any] = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(__magic_name__ ):
os.makedirs(__magic_name__ )
snake_case_ : str = model.state_dict()
def to_tf_var_name(__magic_name__ ):
for patt, repl in iter(__magic_name__ ):
snake_case_ : List[str] = name.replace(__magic_name__ ,__magic_name__ )
return F'''bert/{name}'''
def create_tf_var(__magic_name__ ,__magic_name__ ,__magic_name__ ):
snake_case_ : List[Any] = tf.dtypes.as_dtype(tensor.dtype )
snake_case_ : Union[str, Any] = tf.get_variable(dtype=__magic_name__ ,shape=tensor.shape ,name=__magic_name__ ,initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__magic_name__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
snake_case_ : Optional[int] = to_tf_var_name(__magic_name__ )
snake_case_ : Dict = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
snake_case_ : List[Any] = torch_tensor.T
snake_case_ : Union[str, Any] = create_tf_var(tensor=__magic_name__ ,name=__magic_name__ ,session=__magic_name__ )
tf.keras.backend.set_value(__magic_name__ ,__magic_name__ )
snake_case_ : List[str] = session.run(__magic_name__ )
print(F'''Successfully created {tf_name}: {np.allclose(__magic_name__ ,__magic_name__ )}''' )
snake_case_ : Any = tf.train.Saver(tf.trainable_variables() )
saver.save(__magic_name__ ,os.path.join(__magic_name__ ,model_name.replace("-" ,"_" ) + ".ckpt" ) )
def __UpperCAmelCase ( __magic_name__=None )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Any = argparse.ArgumentParser()
parser.add_argument("--model_name" ,type=__magic_name__ ,required=__magic_name__ ,help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" ,type=__magic_name__ ,default=__magic_name__ ,required=__magic_name__ ,help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" ,type=__magic_name__ ,required=__magic_name__ ,help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" ,type=__magic_name__ ,required=__magic_name__ ,help="Directory in which to save tensorflow model" )
snake_case_ : Optional[int] = parser.parse_args(__magic_name__ )
snake_case_ : Optional[int] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name ,state_dict=torch.load(args.pytorch_model_path ) ,cache_dir=args.cache_dir ,)
convert_pytorch_checkpoint_to_tf(model=__magic_name__ ,ckpt_dir=args.tf_cache_dir ,model_name=args.model_name )
if __name__ == "__main__":
main()
| 653 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> bool:
"""simple docstring"""
snake_case_ : Optional[int] = len(__magic_name__ ) + 1
snake_case_ : str = len(__magic_name__ ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
snake_case_ : Union[str, Any] = [[0 for i in range(__magic_name__ )] for j in range(__magic_name__ )]
# since string of zero length match pattern of zero length
snake_case_ : Any = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 ,__magic_name__ ):
snake_case_ : str = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 ,__magic_name__ ):
snake_case_ : Tuple = dp[0][j - 2] if pattern[j - 1] == "*" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 ,__magic_name__ ):
for j in range(1 ,__magic_name__ ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
snake_case_ : Any = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
snake_case_ : Union[str, Any] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
snake_case_ : List[str] = dp[i - 1][j]
else:
snake_case_ : str = 0
else:
snake_case_ : Any = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
__lowerCamelCase : Union[str, Any] = '''aab'''
__lowerCamelCase : Any = '''c*a*b'''
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'''{input_string} matches the given pattern {pattern}''')
else:
print(f'''{input_string} does not match with the given pattern {pattern}''')
| 653 |
'''simple docstring'''
from collections import deque
from .hash_table import HashTable
class A_ (a_ ):
"""simple docstring"""
def __init__( self :List[str] , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :Dict ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self :Optional[int] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(lowerCAmelCase__ )
snake_case_ : Tuple = self.values[key]
def _A ( self :int ) -> Dict:
'''simple docstring'''
return (
sum(self.charge_factor - len(lowerCAmelCase__ ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def _A ( self :str , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple=None ) -> Any:
'''simple docstring'''
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(lowerCAmelCase__ ) == 0
):
return key
return super()._collision_resolution(lowerCAmelCase__ , lowerCAmelCase__ )
| 653 | 1 |
'''simple docstring'''
import qiskit
def __UpperCAmelCase ( __magic_name__ = 2 )-> qiskit.result.counts.Counts:
"""simple docstring"""
snake_case_ : Any = qubits
# Using Aer's simulator
snake_case_ : List[str] = qiskit.Aer.get_backend("aer_simulator" )
# Creating a Quantum Circuit acting on the q register
snake_case_ : Optional[int] = qiskit.QuantumCircuit(__magic_name__ ,__magic_name__ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 ,__magic_name__ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 ,__magic_name__ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(__magic_name__ ) ) ,list(range(__magic_name__ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
snake_case_ : Tuple = qiskit.execute(__magic_name__ ,__magic_name__ ,shots=1000 )
return job.result().get_counts(__magic_name__ )
if __name__ == "__main__":
print(f'''Total count for various states are: {quantum_entanglement(3)}''')
| 653 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__lowerCamelCase : Dict = TypeVar('''KEY''')
__lowerCamelCase : int = TypeVar('''VAL''')
@dataclass(frozen=a_ , slots=a_ )
class A_ (Generic[KEY, VAL] ):
"""simple docstring"""
a__ = 42
a__ = 42
class A_ (_Item ):
"""simple docstring"""
def __init__( self :List[Any] ) -> None:
'''simple docstring'''
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def __bool__( self :Optional[int] ) -> bool:
'''simple docstring'''
return False
__lowerCamelCase : Dict = _DeletedItem()
class A_ (MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self :Dict , lowerCAmelCase__ :int = 8 , lowerCAmelCase__ :float = 0.7_5 ) -> None:
'''simple docstring'''
snake_case_ : Any = initial_block_size
snake_case_ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
snake_case_ : Tuple = capacity_factor
snake_case_ : List[Any] = 0
def _A ( self :Tuple , lowerCAmelCase__ :KEY ) -> int:
'''simple docstring'''
return hash(lowerCAmelCase__ ) % len(self._buckets )
def _A ( self :Any , lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def _A ( self :str , lowerCAmelCase__ :int , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> bool:
'''simple docstring'''
snake_case_ : Optional[int] = self._buckets[ind]
if not stored:
snake_case_ : int = _Item(lowerCAmelCase__ , lowerCAmelCase__ )
self._len += 1
return True
elif stored.key == key:
snake_case_ : Optional[int] = _Item(lowerCAmelCase__ , lowerCAmelCase__ )
return True
else:
return False
def _A ( self :int ) -> bool:
'''simple docstring'''
snake_case_ : Any = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCAmelCase__ )
def _A ( self :Any ) -> bool:
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
snake_case_ : Optional[int] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _A ( self :Tuple , lowerCAmelCase__ :int ) -> None:
'''simple docstring'''
snake_case_ : Tuple = self._buckets
snake_case_ : int = [None] * new_size
snake_case_ : Any = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _A ( self :Optional[int] ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def _A ( self :str ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def _A ( self :Optional[int] , lowerCAmelCase__ :KEY ) -> Iterator[int]:
'''simple docstring'''
snake_case_ : str = self._get_bucket_index(lowerCAmelCase__ )
for _ in range(len(self._buckets ) ):
yield ind
snake_case_ : List[Any] = self._get_next_ind(lowerCAmelCase__ )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(lowerCAmelCase__ ):
if self._try_set(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
break
def __setitem__( self :Optional[int] , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> None:
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(lowerCAmelCase__ , lowerCAmelCase__ )
def __delitem__( self :List[Any] , lowerCAmelCase__ :KEY ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(lowerCAmelCase__ ):
snake_case_ : int = self._buckets[ind]
if item is None:
raise KeyError(lowerCAmelCase__ )
if item is _deleted:
continue
if item.key == key:
snake_case_ : List[str] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self :List[str] , lowerCAmelCase__ :KEY ) -> VAL:
'''simple docstring'''
for ind in self._iterate_buckets(lowerCAmelCase__ ):
snake_case_ : Optional[Any] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCAmelCase__ )
def __len__( self :Optional[Any] ) -> int:
'''simple docstring'''
return self._len
def __iter__( self :List[Any] ) -> Iterator[KEY]:
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self :Any ) -> str:
'''simple docstring'''
snake_case_ : Dict = " ,".join(
F'''{item.key}: {item.val}''' for item in self._buckets if item )
return F'''HashMap({val_string})'''
| 653 | 1 |
'''simple docstring'''
import math
def __UpperCAmelCase ( __magic_name__ )-> bool:
"""simple docstring"""
snake_case_ : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__magic_name__ )
def __UpperCAmelCase ( __magic_name__ = 1 / 1_2345 )-> int:
"""simple docstring"""
snake_case_ : Any = 0
snake_case_ : int = 0
snake_case_ : Union[str, Any] = 3
while True:
snake_case_ : Any = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__magic_name__ ):
snake_case_ : Optional[Any] = int(__magic_name__ )
total_partitions += 1
if check_partition_perfect(__magic_name__ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__magic_name__ )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 653 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : str = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''gpt_bigcode'''
a__ = ['''past_key_values''']
a__ = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self :List[Any] , lowerCAmelCase__ :Any=50_257 , lowerCAmelCase__ :Dict=1_024 , lowerCAmelCase__ :Optional[int]=768 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :int=12 , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :List[str]="gelu_pytorch_tanh" , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Any=1E-5 , lowerCAmelCase__ :Union[str, Any]=0.0_2 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :int=50_256 , lowerCAmelCase__ :List[str]=50_256 , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :str=True , lowerCAmelCase__ :int=True , **lowerCAmelCase__ :Union[str, Any] , ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = vocab_size
snake_case_ : Any = n_positions
snake_case_ : Any = n_embd
snake_case_ : Optional[Any] = n_layer
snake_case_ : List[Any] = n_head
snake_case_ : Tuple = n_inner
snake_case_ : str = activation_function
snake_case_ : Union[str, Any] = resid_pdrop
snake_case_ : Optional[Any] = embd_pdrop
snake_case_ : Any = attn_pdrop
snake_case_ : List[Any] = layer_norm_epsilon
snake_case_ : Tuple = initializer_range
snake_case_ : int = scale_attn_weights
snake_case_ : Union[str, Any] = use_cache
snake_case_ : Dict = attention_softmax_in_fpaa
snake_case_ : Any = scale_attention_softmax_in_fpaa
snake_case_ : List[str] = multi_query
snake_case_ : List[str] = bos_token_id
snake_case_ : Any = eos_token_id
super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
| 653 | 1 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class A_ :
"""simple docstring"""
@staticmethod
def _A ( *lowerCAmelCase__ :List[Any] , **lowerCAmelCase__ :Any ) -> Any:
'''simple docstring'''
pass
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__lowerCamelCase : Optional[int] = (
'''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'''
)
@is_pipeline_test
@require_torch
@require_vision
class A_ (unittest.TestCase ):
"""simple docstring"""
a__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _A ( self :Optional[int] , lowerCAmelCase__ :int , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :str ) -> Tuple:
'''simple docstring'''
snake_case_ : Any = pipeline(
"document-question-answering" , model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
snake_case_ : Dict = INVOICE_URL
snake_case_ : Optional[Any] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , "" ) ) )
snake_case_ : Any = "What is the placebo?"
snake_case_ : Tuple = [
{
"image": load_image(lowerCAmelCase__ ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def _A ( self :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[int] ) -> str:
'''simple docstring'''
snake_case_ : Any = dqa_pipeline(lowerCAmelCase__ , top_k=2 )
self.assertEqual(
lowerCAmelCase__ , [
[
{"score": ANY(lowerCAmelCase__ ), "answer": ANY(lowerCAmelCase__ ), "start": ANY(lowerCAmelCase__ ), "end": ANY(lowerCAmelCase__ )},
{"score": ANY(lowerCAmelCase__ ), "answer": ANY(lowerCAmelCase__ ), "start": ANY(lowerCAmelCase__ ), "end": ANY(lowerCAmelCase__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def _A ( self :str ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[Any] = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" )
snake_case_ : int = INVOICE_URL
snake_case_ : Optional[int] = "How many cats are there?"
snake_case_ : List[str] = [
{"score": 0.0_0_0_1, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.0_0_0_1, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
snake_case_ : Tuple = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ )
snake_case_ : str = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
snake_case_ : List[Any] = "./tests/fixtures/tests_samples/COCO/000000039769.png"
snake_case_ : str = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(lowerCAmelCase__ , [] )
# We can optionnally pass directly the words and bounding boxes
snake_case_ : Optional[int] = "./tests/fixtures/tests_samples/COCO/000000039769.png"
snake_case_ : Optional[Any] = []
snake_case_ : Optional[int] = []
snake_case_ : Optional[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , words=lowerCAmelCase__ , boxes=lowerCAmelCase__ , top_k=2 )
self.assertEqual(lowerCAmelCase__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _A ( self :Any ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , )
snake_case_ : List[str] = INVOICE_URL
snake_case_ : Tuple = "What is the invoice number?"
snake_case_ : Optional[int] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_9_4_4, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_0_0_9, "answer": "us-001", "start": 16, "end": 16},
] , )
snake_case_ : Tuple = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_9_4_4, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_0_0_9, "answer": "us-001", "start": 16, "end": 16},
] , )
snake_case_ : Any = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{"score": 0.9_9_4_4, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_0_0_9, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _A ( self :str ) -> Any:
'''simple docstring'''
snake_case_ : Tuple = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , )
snake_case_ : Optional[Any] = INVOICE_URL
snake_case_ : str = "What is the invoice number?"
snake_case_ : Dict = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_9_7_4, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_9_4_8, "answer": "us-001", "start": 16, "end": 16},
] , )
snake_case_ : Tuple = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_9_7_4, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_9_4_8, "answer": "us-001", "start": 16, "end": 16},
] , )
snake_case_ : Union[str, Any] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{"score": 0.9_9_7_4, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_9_4_8, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _A ( self :Optional[Any] ) -> str:
'''simple docstring'''
snake_case_ : str = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=lowerCAmelCase__ )
snake_case_ : Optional[int] = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=lowerCAmelCase__ , revision="3dc6de3" , )
snake_case_ : Optional[int] = INVOICE_URL
snake_case_ : Any = "What is the invoice number?"
snake_case_ : List[str] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.4_2_5_1, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_8_1_9, "answer": "1110212019", "start": 23, "end": 23},
] , )
snake_case_ : Any = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.4_2_5_1, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_8_1_9, "answer": "1110212019", "start": 23, "end": 23},
] , )
snake_case_ : int = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{"score": 0.4_2_5_1, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_8_1_9, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2 , )
snake_case_ : Optional[int] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , "" ) ) )
# This model should also work if `image` is set to None
snake_case_ : List[Any] = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.4_2_5_1, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_8_1_9, "answer": "1110212019", "start": 23, "end": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _A ( self :Any ) -> Dict:
'''simple docstring'''
snake_case_ : Tuple = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=lowerCAmelCase__ )
snake_case_ : int = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=lowerCAmelCase__ , revision="3dc6de3" , max_seq_len=50 , )
snake_case_ : Tuple = INVOICE_URL
snake_case_ : int = "What is the invoice number?"
snake_case_ : Tuple = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_9_9_9, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_9_9_8, "answer": "us-001", "start": 16, "end": 16},
] , )
snake_case_ : Dict = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{"score": 0.9_9_9_9, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_9_9_8, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
snake_case_ : List[str] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , "" ) ) )
# This model should also work if `image` is set to None
snake_case_ : List[Any] = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_9_9_9, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_9_9_8, "answer": "us-001", "start": 16, "end": 16},
] , )
@slow
@require_torch
def _A ( self :int ) -> Dict:
'''simple docstring'''
snake_case_ : Union[str, Any] = pipeline(
"document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , )
snake_case_ : Any = INVOICE_URL
snake_case_ : int = "What is the invoice number?"
snake_case_ : List[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def _A ( self :Optional[Any] ) -> List[str]:
'''simple docstring'''
pass
| 653 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
__lowerCamelCase : Union[str, Any] = logging.getLogger(__name__)
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Dict = git.Repo(search_parent_directories=__magic_name__ )
snake_case_ : Optional[int] = {
"repo_id": str(__magic_name__ ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
}
with open(os.path.join(__magic_name__ ,"git_log.json" ) ,"w" ) as f:
json.dump(__magic_name__ ,__magic_name__ ,indent=4 )
def __UpperCAmelCase ( __magic_name__ )-> Tuple:
"""simple docstring"""
if params.n_gpu <= 0:
snake_case_ : Any = 0
snake_case_ : Any = -1
snake_case_ : Tuple = True
snake_case_ : List[str] = False
return
assert torch.cuda.is_available()
logger.info("Initializing GPUs" )
if params.n_gpu > 1:
assert params.local_rank != -1
snake_case_ : Optional[int] = int(os.environ["WORLD_SIZE"] )
snake_case_ : int = int(os.environ["N_GPU_NODE"] )
snake_case_ : Any = int(os.environ["RANK"] )
# number of nodes / node ID
snake_case_ : Dict = params.world_size // params.n_gpu_per_node
snake_case_ : Optional[int] = params.global_rank // params.n_gpu_per_node
snake_case_ : Tuple = True
assert params.n_nodes == int(os.environ["N_NODES"] )
assert params.node_id == int(os.environ["NODE_RANK"] )
# local job (single GPU)
else:
assert params.local_rank == -1
snake_case_ : Optional[int] = 1
snake_case_ : str = 0
snake_case_ : List[Any] = 0
snake_case_ : int = 0
snake_case_ : Dict = 1
snake_case_ : Optional[Any] = 1
snake_case_ : str = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
snake_case_ : str = params.node_id == 0 and params.local_rank == 0
snake_case_ : str = params.n_nodes > 1
# summary
snake_case_ : str = F'''--- Global rank: {params.global_rank} - '''
logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes )
logger.info(PREFIX + "Node ID : %i" % params.node_id )
logger.info(PREFIX + "Local rank : %i" % params.local_rank )
logger.info(PREFIX + "World size : %i" % params.world_size )
logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node )
logger.info(PREFIX + "Master : %s" % str(params.is_master ) )
logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) )
logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) )
logger.info(PREFIX + "Hostname : %s" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("Initializing PyTorch distributed" )
torch.distributed.init_process_group(
init_method="env://" ,backend="nccl" ,)
def __UpperCAmelCase ( __magic_name__ )-> Dict:
"""simple docstring"""
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 653 | 1 |
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class A_ (a_ ):
"""simple docstring"""
a__ = DistilBertTokenizer
a__ = DistilBertTokenizerFast
a__ = True
@slow
def _A ( self :Tuple ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[str] = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" )
snake_case_ : List[str] = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase__ )
snake_case_ : Any = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 653 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class A_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self :Any , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict=7 , lowerCAmelCase__ :Union[str, Any]=3 , lowerCAmelCase__ :List[str]=30 , lowerCAmelCase__ :List[str]=400 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :str=True , lowerCAmelCase__ :Optional[int]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :Optional[int]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :str=True , lowerCAmelCase__ :int=1 / 255 , lowerCAmelCase__ :int=True , ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333}
snake_case_ : Dict = parent
snake_case_ : Union[str, Any] = batch_size
snake_case_ : Optional[Any] = num_channels
snake_case_ : str = min_resolution
snake_case_ : Dict = max_resolution
snake_case_ : Optional[Any] = do_resize
snake_case_ : str = size
snake_case_ : Optional[int] = do_normalize
snake_case_ : Dict = image_mean
snake_case_ : Optional[int] = image_std
snake_case_ : List[str] = do_rescale
snake_case_ : Dict = rescale_factor
snake_case_ : str = do_pad
def _A ( self :List[Any] ) -> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _A ( self :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str]=False ) -> str:
'''simple docstring'''
if not batched:
snake_case_ : List[str] = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image ):
snake_case_, snake_case_ : int = image.size
else:
snake_case_, snake_case_ : Any = image.shape[1], image.shape[2]
if w < h:
snake_case_ : int = int(self.size["shortest_edge"] * h / w )
snake_case_ : List[Any] = self.size["shortest_edge"]
elif w > h:
snake_case_ : Optional[int] = self.size["shortest_edge"]
snake_case_ : str = int(self.size["shortest_edge"] * w / h )
else:
snake_case_ : Tuple = self.size["shortest_edge"]
snake_case_ : Dict = self.size["shortest_edge"]
else:
snake_case_ : List[str] = []
for image in image_inputs:
snake_case_, snake_case_ : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ : str = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0]
snake_case_ : int = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = YolosImageProcessor if is_vision_available() else None
def _A ( self :Optional[Any] ) -> str:
'''simple docstring'''
snake_case_ : int = YolosImageProcessingTester(self )
@property
def _A ( self :List[str] ) -> Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _A ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "image_mean" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "image_std" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) )
def _A ( self :List[Any] ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
snake_case_ : Optional[int] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCAmelCase__ )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
def _A ( self :List[str] ) -> int:
'''simple docstring'''
pass
def _A ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
snake_case_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : int = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_, snake_case_ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
snake_case_ : Any = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _A ( self :Dict ) -> Dict:
'''simple docstring'''
snake_case_ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : List[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : Tuple = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : Dict = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _A ( self :Tuple ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
snake_case_ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : List[Any] = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _A ( self :Tuple ) -> Dict:
'''simple docstring'''
snake_case_ : str = self.image_processing_class(**self.image_processor_dict )
snake_case_ : List[Any] = self.image_processing_class(do_resize=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ , do_rescale=lowerCAmelCase__ )
# create random PyTorch tensors
snake_case_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
snake_case_ : Tuple = image_processing_a.pad(lowerCAmelCase__ , return_tensors="pt" )
snake_case_ : Union[str, Any] = image_processing_a(lowerCAmelCase__ , return_tensors="pt" )
self.assertTrue(
torch.allclose(encoded_images_with_method["pixel_values"] , encoded_images["pixel_values"] , atol=1E-4 ) )
@slow
def _A ( self :str ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case_ : int = json.loads(f.read() )
snake_case_ : Optional[int] = {"image_id": 39_769, "annotations": target}
# encode them
snake_case_ : Tuple = YolosImageProcessor.from_pretrained("hustvl/yolos-small" )
snake_case_ : Dict = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , return_tensors="pt" )
# verify pixel values
snake_case_ : Optional[int] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase__ )
snake_case_ : str = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
# verify area
snake_case_ : Dict = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase__ ) )
# verify boxes
snake_case_ : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase__ )
snake_case_ : Any = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase__ , atol=1E-3 ) )
# verify image_id
snake_case_ : Dict = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase__ ) )
# verify is_crowd
snake_case_ : int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase__ ) )
# verify class_labels
snake_case_ : List[str] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase__ ) )
# verify orig_size
snake_case_ : Any = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase__ ) )
# verify size
snake_case_ : List[Any] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase__ ) )
@slow
def _A ( self :Dict ) -> int:
'''simple docstring'''
snake_case_ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case_ : Optional[int] = json.loads(f.read() )
snake_case_ : Tuple = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target}
snake_case_ : Any = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case_ : int = YolosImageProcessor(format="coco_panoptic" )
snake_case_ : Union[str, Any] = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , masks_path=lowerCAmelCase__ , return_tensors="pt" )
# verify pixel values
snake_case_ : Optional[int] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase__ )
snake_case_ : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
# verify area
snake_case_ : int = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase__ ) )
# verify boxes
snake_case_ : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase__ )
snake_case_ : List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase__ , atol=1E-3 ) )
# verify image_id
snake_case_ : List[str] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase__ ) )
# verify is_crowd
snake_case_ : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase__ ) )
# verify class_labels
snake_case_ : str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase__ ) )
# verify masks
snake_case_ : Any = 822_873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCAmelCase__ )
# verify orig_size
snake_case_ : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase__ ) )
# verify size
snake_case_ : Union[str, Any] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase__ ) )
| 653 | 1 |
'''simple docstring'''
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
class A_ :
"""simple docstring"""
a__ = None
@experimental
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> List[str]:
"""simple docstring"""
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
return _map_with_joblib(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Optional[Any] = num_proc if num_proc <= len(__magic_name__ ) else len(__magic_name__ )
snake_case_ : Dict = [] # We organize the splits ourselve (contiguous splits)
for index in range(__magic_name__ ):
snake_case_ : Union[str, Any] = len(__magic_name__ ) // num_proc
snake_case_ : str = len(__magic_name__ ) % num_proc
snake_case_ : Any = div * index + min(__magic_name__ ,__magic_name__ )
snake_case_ : Optional[int] = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(__magic_name__ ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F'''Error dividing inputs iterable among processes. '''
F'''Total number of objects {len(__magic_name__ )}, '''
F'''length: {sum(len(i[1] ) for i in split_kwds )}''' )
logger.info(
F'''Spawning {num_proc} processes for {len(__magic_name__ )} objects in slices of {[len(i[1] ) for i in split_kwds]}''' )
snake_case_, snake_case_ : Dict = None, None
if not disable_tqdm:
snake_case_, snake_case_ : Optional[int] = (RLock(),), tqdm.set_lock
with Pool(__magic_name__ ,initargs=__magic_name__ ,initializer=__magic_name__ ) as pool:
snake_case_ : Optional[Any] = pool.map(__magic_name__ ,__magic_name__ )
logger.info(F'''Finished {num_proc} processes''' )
snake_case_ : List[str] = [obj for proc_res in mapped for obj in proc_res]
logger.info(F'''Unpacked {len(__magic_name__ )} objects''' )
return mapped
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name ,n_jobs=__magic_name__ ):
return joblib.Parallel()(
joblib.delayed(__magic_name__ )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]:
"""simple docstring"""
snake_case_ : Dict = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
snake_case_ : Union[str, Any] = None
| 653 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
if not isinstance(__magic_name__ ,__magic_name__ ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(__magic_name__ ,__magic_name__ ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
snake_case_ : Dict = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__magic_name__ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 653 | 1 |
'''simple docstring'''
import os
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
snake_case_ : List[str] = os.path.join(os.path.dirname(__magic_name__ ) ,"num.txt" )
with open(__magic_name__ ) as file_hand:
return str(sum(int(__magic_name__ ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 653 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCamelCase : Tuple = 16
__lowerCamelCase : Optional[int] = 32
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = 16 )-> int:
"""simple docstring"""
snake_case_ : Optional[int] = AutoTokenizer.from_pretrained("bert-base-cased" )
snake_case_ : str = load_dataset("glue" ,"mrpc" )
def tokenize_function(__magic_name__ ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ : Dict = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=__magic_name__ ,max_length=__magic_name__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case_ : Any = datasets.map(
__magic_name__ ,batched=__magic_name__ ,remove_columns=["idx", "sentence1", "sentence2"] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ : List[Any] = tokenized_datasets.rename_column("label" ,"labels" )
def collate_fn(__magic_name__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case_ : int = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case_ : Tuple = 16
elif accelerator.mixed_precision != "no":
snake_case_ : str = 8
else:
snake_case_ : Optional[Any] = None
return tokenizer.pad(
__magic_name__ ,padding="longest" ,max_length=__magic_name__ ,pad_to_multiple_of=__magic_name__ ,return_tensors="pt" ,)
# Instantiate dataloaders.
snake_case_ : str = DataLoader(
tokenized_datasets["train"] ,shuffle=__magic_name__ ,collate_fn=__magic_name__ ,batch_size=__magic_name__ )
snake_case_ : Optional[Any] = DataLoader(
tokenized_datasets["validation"] ,shuffle=__magic_name__ ,collate_fn=__magic_name__ ,batch_size=__magic_name__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__lowerCamelCase : Optional[Any] = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Dict:
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" ,__magic_name__ ) == "1":
snake_case_ : List[str] = 2
# Initialize accelerator
snake_case_ : Union[str, Any] = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ : List[str] = config["lr"]
snake_case_ : Dict = int(config["num_epochs"] )
snake_case_ : Dict = int(config["seed"] )
snake_case_ : Optional[int] = int(config["batch_size"] )
snake_case_ : Dict = evaluate.load("glue" ,"mrpc" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__magic_name__ )
def inner_training_loop(__magic_name__ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__magic_name__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" ,return_dict=__magic_name__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case_ : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
snake_case_ : List[Any] = AdamW(params=model.parameters() ,lr=__magic_name__ )
snake_case_, snake_case_ : int = get_dataloaders(__magic_name__ ,__magic_name__ )
# Instantiate scheduler
snake_case_ : Tuple = get_linear_schedule_with_warmup(
optimizer=__magic_name__ ,num_warmup_steps=100 ,num_training_steps=(len(__magic_name__ ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : Tuple = accelerator.prepare(
__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
# Now we train the model
for epoch in range(__magic_name__ ):
model.train()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case_ : int = model(**__magic_name__ )
snake_case_ : Any = outputs.loss
accelerator.backward(__magic_name__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ : Union[str, Any] = model(**__magic_name__ )
snake_case_ : List[str] = outputs.logits.argmax(dim=-1 )
snake_case_, snake_case_ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=__magic_name__ ,references=__magic_name__ ,)
snake_case_ : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' ,__magic_name__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
snake_case_ : List[Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" ,type=__magic_name__ ,default=__magic_name__ ,choices=["no", "fp16", "bf16", "fp8"] ,help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." ,)
parser.add_argument("--cpu" ,action="store_true" ,help="If passed, will train on the CPU." )
snake_case_ : str = parser.parse_args()
snake_case_ : Optional[int] = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(__magic_name__ ,__magic_name__ )
if __name__ == "__main__":
main()
| 653 | 1 |
'''simple docstring'''
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__lowerCamelCase : Dict = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
__lowerCamelCase : Optional[Any] = importlib.util.spec_from_file_location(
'''transformers''',
os.path.join(PATH_TO_TRANSFORMERS, '''__init__.py'''),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
__lowerCamelCase : List[Any] = spec.loader.load_module()
__lowerCamelCase : Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__lowerCamelCase : Union[str, Any] = re.compile('''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
__lowerCamelCase : Any = {
'''CLIPConfigMixin''',
'''DecisionTransformerConfigMixin''',
'''EncoderDecoderConfigMixin''',
'''RagConfigMixin''',
'''SpeechEncoderDecoderConfigMixin''',
'''VisionEncoderDecoderConfigMixin''',
'''VisionTextDualEncoderConfigMixin''',
}
def __UpperCAmelCase ( )-> Optional[Any]:
"""simple docstring"""
snake_case_ : int = []
for config_class in list(CONFIG_MAPPING.values() ):
snake_case_ : Tuple = False
# source code of `config_class`
snake_case_ : List[Any] = inspect.getsource(__magic_name__ )
snake_case_ : List[Any] = _re_checkpoint.findall(__magic_name__ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
snake_case_, snake_case_ : Tuple = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
snake_case_ : Any = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
snake_case_ : Union[str, Any] = True
break
snake_case_ : Union[str, Any] = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__magic_name__ )
if len(__magic_name__ ) > 0:
snake_case_ : Dict = "\n".join(sorted(__magic_name__ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 653 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class A_ (a_ ):
"""simple docstring"""
a__ = '''facebook/bart-large-mnli'''
a__ = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
a__ = '''text_classifier'''
a__ = AutoTokenizer
a__ = AutoModelForSequenceClassification
a__ = ['''text''', ['''text''']]
a__ = ['''text''']
def _A ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
super().setup()
snake_case_ : Optional[int] = self.model.config
snake_case_ : Any = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
snake_case_ : Union[str, Any] = int(lowerCAmelCase__ )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def _A ( self :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :Tuple ) -> int:
'''simple docstring'''
snake_case_ : Tuple = labels
return self.pre_processor(
[text] * len(lowerCAmelCase__ ) , [F'''This example is {label}''' for label in labels] , return_tensors="pt" , padding="max_length" , )
def _A ( self :Any , lowerCAmelCase__ :str ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = outputs.logits
snake_case_ : Tuple = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 653 | 1 |
'''simple docstring'''
import numpy as np
def __UpperCAmelCase ( __magic_name__ )-> np.ndarray:
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
def __UpperCAmelCase ( __magic_name__ )-> np.ndarray:
"""simple docstring"""
return vector * sigmoid(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 653 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase : Any = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''ViTFeatureExtractor''']
__lowerCamelCase : Any = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__lowerCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 653 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
__lowerCamelCase : Tuple = logging.get_logger(__name__)
__lowerCamelCase : str = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''adapter_layer''': '''encoder.layers.*.adapter_layer''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
'''pooling_layer.linear''': '''projector''',
'''pooling_layer.projection''': '''classifier''',
}
__lowerCamelCase : List[str] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''projector''',
'''classifier''',
]
def __UpperCAmelCase ( __magic_name__ )-> Any:
"""simple docstring"""
snake_case_ : Union[str, Any] = {}
with open(__magic_name__ ,"r" ) as file:
for line_number, line in enumerate(__magic_name__ ):
snake_case_ : str = line.strip()
if line:
snake_case_ : Any = line.split()
snake_case_ : Any = line_number
snake_case_ : Tuple = words[0]
snake_case_ : Union[str, Any] = value
return result
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> Union[str, Any]:
"""simple docstring"""
for attribute in key.split("." ):
snake_case_ : int = getattr(__magic_name__ ,__magic_name__ )
snake_case_ : List[Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__magic_name__ ):
snake_case_ : Tuple = PARAM_MAPPING[full_name.split("." )[-1]]
snake_case_ : Dict = "param"
if weight_type is not None and weight_type != "param":
snake_case_ : List[str] = getattr(__magic_name__ ,__magic_name__ ).shape
elif weight_type is not None and weight_type == "param":
snake_case_ : Tuple = hf_pointer
for attribute in hf_param_name.split("." ):
snake_case_ : Any = getattr(__magic_name__ ,__magic_name__ )
snake_case_ : str = shape_pointer.shape
# let's reduce dimension
snake_case_ : Tuple = value[0]
else:
snake_case_ : str = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
snake_case_ : Optional[int] = value
elif weight_type == "weight_g":
snake_case_ : Union[str, Any] = value
elif weight_type == "weight_v":
snake_case_ : Dict = value
elif weight_type == "bias":
snake_case_ : Tuple = value
elif weight_type == "param":
for attribute in hf_param_name.split("." ):
snake_case_ : Union[str, Any] = getattr(__magic_name__ ,__magic_name__ )
snake_case_ : Tuple = value
else:
snake_case_ : List[str] = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> Tuple:
"""simple docstring"""
snake_case_ : Union[str, Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__magic_name__ ):
snake_case_ : List[Any] = PARAM_MAPPING[full_name.split("." )[-1]]
snake_case_ : Dict = "param"
if weight_type is not None and weight_type != "param":
snake_case_ : Optional[Any] = ".".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
snake_case_ : List[Any] = ".".join([key, hf_param_name] )
else:
snake_case_ : Optional[int] = key
snake_case_ : List[Any] = value if "lm_head" in full_key else value[0]
__lowerCamelCase : Optional[int] = {
'''W_a''': '''linear_1.weight''',
'''W_b''': '''linear_2.weight''',
'''b_a''': '''linear_1.bias''',
'''b_b''': '''linear_2.bias''',
'''ln_W''': '''norm.weight''',
'''ln_b''': '''norm.bias''',
}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__=None ,__magic_name__=None )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = False
for key, mapped_key in MAPPING.items():
snake_case_ : Tuple = "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
snake_case_ : Optional[Any] = True
if "*" in mapped_key:
snake_case_ : Dict = name.split(__magic_name__ )[0].split("." )[-2]
snake_case_ : str = mapped_key.replace("*" ,__magic_name__ )
if "weight_g" in name:
snake_case_ : Union[str, Any] = "weight_g"
elif "weight_v" in name:
snake_case_ : Optional[int] = "weight_v"
elif "bias" in name:
snake_case_ : Any = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case_ : str = "weight"
else:
snake_case_ : Tuple = None
if hf_dict is not None:
rename_dict(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
else:
set_recursively(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
return is_used
return is_used
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Optional[int]:
"""simple docstring"""
snake_case_ : str = []
snake_case_ : Tuple = fairseq_model.state_dict()
snake_case_ : Tuple = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
snake_case_ : Dict = False
if "conv_layers" in name:
load_conv_layer(
__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,hf_model.config.feat_extract_norm == "group" ,)
snake_case_ : Any = True
else:
snake_case_ : int = load_wavaveca_layer(__magic_name__ ,__magic_name__ ,__magic_name__ )
if not is_used:
unused_weights.append(__magic_name__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : List[str] = full_name.split("conv_layers." )[-1]
snake_case_ : int = name.split("." )
snake_case_ : Dict = int(items[0] )
snake_case_ : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
snake_case_ : List[Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
snake_case_ : List[str] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
snake_case_ : Dict = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
snake_case_ : List[str] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__magic_name__ )
@torch.no_grad()
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__=None ,__magic_name__=None ,__magic_name__=True ,__magic_name__=False )-> List[str]:
"""simple docstring"""
if config_path is not None:
snake_case_ : Dict = WavaVecaConfig.from_pretrained(__magic_name__ )
else:
snake_case_ : Union[str, Any] = WavaVecaConfig()
if is_seq_class:
snake_case_ : Optional[int] = read_txt_into_dict(__magic_name__ )
snake_case_ : Any = idalabel
snake_case_ : Tuple = WavaVecaForSequenceClassification(__magic_name__ )
snake_case_ : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=1_6000 ,padding_value=0 ,do_normalize=__magic_name__ ,return_attention_mask=__magic_name__ ,)
feature_extractor.save_pretrained(__magic_name__ )
elif is_finetuned:
if dict_path:
snake_case_ : Optional[Any] = Dictionary.load(__magic_name__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case_ : Dict = target_dict.pad_index
snake_case_ : Dict = target_dict.bos_index
snake_case_ : List[Any] = target_dict.eos_index
snake_case_ : Tuple = len(target_dict.symbols )
snake_case_ : Dict = os.path.join(__magic_name__ ,"vocab.json" )
if not os.path.isdir(__magic_name__ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__magic_name__ ) )
return
os.makedirs(__magic_name__ ,exist_ok=__magic_name__ )
snake_case_ : int = target_dict.indices
# fairseq has the <pad> and <s> switched
snake_case_ : Optional[int] = 0
snake_case_ : str = 1
with open(__magic_name__ ,"w" ,encoding="utf-8" ) as vocab_handle:
json.dump(__magic_name__ ,__magic_name__ )
snake_case_ : Optional[Any] = WavaVecaCTCTokenizer(
__magic_name__ ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="|" ,do_lower_case=__magic_name__ ,)
snake_case_ : Any = True if config.feat_extract_norm == "layer" else False
snake_case_ : Tuple = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=1_6000 ,padding_value=0 ,do_normalize=__magic_name__ ,return_attention_mask=__magic_name__ ,)
snake_case_ : List[str] = WavaVecaProcessor(feature_extractor=__magic_name__ ,tokenizer=__magic_name__ )
processor.save_pretrained(__magic_name__ )
snake_case_ : str = WavaVecaForCTC(__magic_name__ )
else:
snake_case_ : List[Any] = WavaVecaForPreTraining(__magic_name__ )
if is_finetuned or is_seq_class:
snake_case_, snake_case_, snake_case_ : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
snake_case_ : int = argparse.Namespace(task="audio_pretraining" )
snake_case_ : str = fairseq.tasks.setup_task(__magic_name__ )
snake_case_, snake_case_, snake_case_ : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ,task=__magic_name__ )
snake_case_ : Optional[Any] = model[0].eval()
recursively_load_weights(__magic_name__ ,__magic_name__ ,not is_finetuned )
hf_wavavec.save_pretrained(__magic_name__ )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
parser.add_argument(
'''--is_seq_class''',
action='''store_true''',
help='''Whether the model to convert is a fine-tuned sequence classification model or not''',
)
__lowerCamelCase : List[str] = parser.parse_args()
__lowerCamelCase : Optional[Any] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 653 |
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A_ :
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str]=2 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :List[str]=7 , lowerCAmelCase__ :Any=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :List[str]=99 , lowerCAmelCase__ :Union[str, Any]=36 , lowerCAmelCase__ :Dict=3 , lowerCAmelCase__ :str=4 , lowerCAmelCase__ :Optional[int]=37 , lowerCAmelCase__ :Dict="gelu" , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :Optional[int]=512 , lowerCAmelCase__ :Union[str, Any]=16 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :Any=0.0_2 , lowerCAmelCase__ :Dict=6 , lowerCAmelCase__ :Optional[int]=6 , lowerCAmelCase__ :Any=3 , lowerCAmelCase__ :int=4 , lowerCAmelCase__ :int=None , lowerCAmelCase__ :Any=1_000 , ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = parent
snake_case_ : Union[str, Any] = batch_size
snake_case_ : Optional[int] = num_channels
snake_case_ : List[Any] = image_size
snake_case_ : Optional[int] = patch_size
snake_case_ : Union[str, Any] = text_seq_length
snake_case_ : Dict = is_training
snake_case_ : Optional[Any] = use_input_mask
snake_case_ : Union[str, Any] = use_token_type_ids
snake_case_ : Dict = use_labels
snake_case_ : List[str] = vocab_size
snake_case_ : Optional[Any] = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : List[str] = intermediate_size
snake_case_ : str = hidden_act
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Optional[int] = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = max_position_embeddings
snake_case_ : List[Any] = type_vocab_size
snake_case_ : Union[str, Any] = type_sequence_label_size
snake_case_ : List[Any] = initializer_range
snake_case_ : Union[str, Any] = coordinate_size
snake_case_ : int = shape_size
snake_case_ : Tuple = num_labels
snake_case_ : List[Any] = num_choices
snake_case_ : List[str] = scope
snake_case_ : Dict = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
snake_case_ : str = text_seq_length
snake_case_ : Optional[int] = (image_size // patch_size) ** 2 + 1
snake_case_ : str = self.text_seq_length + self.image_seq_length
def _A ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
snake_case_ : str = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case_ : Optional[Any] = bbox[i, j, 3]
snake_case_ : Any = bbox[i, j, 1]
snake_case_ : Tuple = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case_ : str = bbox[i, j, 2]
snake_case_ : Dict = bbox[i, j, 0]
snake_case_ : Union[str, Any] = t
snake_case_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : Dict = None
if self.use_input_mask:
snake_case_ : str = random_attention_mask([self.batch_size, self.text_seq_length] )
snake_case_ : Any = None
if self.use_token_type_ids:
snake_case_ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
snake_case_ : Union[str, Any] = None
snake_case_ : str = None
if self.use_labels:
snake_case_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
snake_case_ : str = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _A ( self :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = LayoutLMvaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
# text + image
snake_case_ : Tuple = model(lowerCAmelCase__ , pixel_values=lowerCAmelCase__ )
snake_case_ : Optional[int] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
snake_case_ : Optional[int] = model(lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
snake_case_ : int = model(lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
snake_case_ : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
snake_case_ : Union[str, Any] = model(pixel_values=lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _A ( self :str , lowerCAmelCase__ :str , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = self.num_labels
snake_case_ : List[Any] = LayoutLMvaForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : Optional[int] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = self.num_labels
snake_case_ : str = LayoutLMvaForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : List[Any] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _A ( self :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :str ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = LayoutLMvaForQuestionAnswering(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : List[Any] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A ( self :int ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) : Optional[Any] = config_and_inputs
snake_case_ : Tuple = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class A_ (a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a__ = False
a__ = False
a__ = False
a__ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
a__ = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def _A ( self :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[Any] ) -> List[str]:
'''simple docstring'''
return True
def _A ( self :List[Any] ) -> str:
'''simple docstring'''
snake_case_ : Tuple = LayoutLMvaModelTester(self )
snake_case_ : Optional[int] = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def _A ( self :Tuple , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any]=False ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = copy.deepcopy(lowerCAmelCase__ )
if model_class in get_values(lowerCAmelCase__ ):
snake_case_ : Optional[Any] = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(lowerCAmelCase__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCAmelCase__ ):
snake_case_ : Union[str, Any] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
elif model_class in get_values(lowerCAmelCase__ ):
snake_case_ : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
snake_case_ : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
elif model_class in [
*get_values(lowerCAmelCase__ ),
]:
snake_case_ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
elif model_class in [
*get_values(lowerCAmelCase__ ),
]:
snake_case_ : List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowerCAmelCase__ , )
return inputs_dict
def _A ( self :Any ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def _A ( self :int ) -> int:
'''simple docstring'''
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _A ( self :Any ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ : int = type
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _A ( self :int ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def _A ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
def _A ( self :int ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
@slow
def _A ( self :Tuple ) -> List[Any]:
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : str = LayoutLMvaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
snake_case_ : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class A_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def _A ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase__ ) if is_vision_available() else None
@slow
def _A ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(lowerCAmelCase__ )
snake_case_ : Optional[Any] = self.default_image_processor
snake_case_ : Optional[int] = prepare_img()
snake_case_ : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).pixel_values.to(lowerCAmelCase__ )
snake_case_ : List[str] = torch.tensor([[1, 2]] )
snake_case_ : Any = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
snake_case_ : Any = model(
input_ids=input_ids.to(lowerCAmelCase__ ) , bbox=bbox.to(lowerCAmelCase__ ) , pixel_values=pixel_values.to(lowerCAmelCase__ ) , )
# verify the logits
snake_case_ : Optional[Any] = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase__ )
snake_case_ : str = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 653 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : List[Any] = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 653 |
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __UpperCAmelCase ( __magic_name__ )-> int: # picklable for multiprocessing
"""simple docstring"""
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
snake_case_ : str = [1, 2, 3]
with pytest.raises(__magic_name__ ):
with parallel_backend("unsupported backend" ):
map_nested(__magic_name__ ,__magic_name__ ,num_proc=2 )
with pytest.raises(__magic_name__ ):
with parallel_backend("unsupported backend" ):
map_nested(__magic_name__ ,__magic_name__ ,num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" ,[2, -1] )
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = [1, 2]
snake_case_ : Union[str, Any] = {"a": 1, "b": 2}
snake_case_ : str = {"a": [1, 2], "b": [3, 4]}
snake_case_ : List[str] = {"a": {"1": 1}, "b": 2}
snake_case_ : Optional[int] = {"a": 1, "b": 2, "c": 3, "d": 4}
snake_case_ : Tuple = [2, 3]
snake_case_ : str = {"a": 2, "b": 3}
snake_case_ : Dict = {"a": [2, 3], "b": [4, 5]}
snake_case_ : List[Any] = {"a": {"1": 2}, "b": 3}
snake_case_ : str = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
| 653 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
snake_case_ : Optional[Any] = str(bin(__magic_name__ ) )[2:] # remove the leading "0b"
snake_case_ : Tuple = str(bin(__magic_name__ ) )[2:]
snake_case_ : int = max(len(__magic_name__ ) ,len(__magic_name__ ) )
return "0b" + "".join(
str(int("1" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(__magic_name__ ) ,b_binary.zfill(__magic_name__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 653 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Dict = logging.get_logger(__name__)
# TODO Update this
__lowerCamelCase : int = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''esm'''
def __init__( self :Dict , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :str=None , lowerCAmelCase__ :int=768 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :Dict=12 , lowerCAmelCase__ :Union[str, Any]=3_072 , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :List[Any]=1_026 , lowerCAmelCase__ :int=0.0_2 , lowerCAmelCase__ :Optional[int]=1E-1_2 , lowerCAmelCase__ :List[str]="absolute" , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :List[str]=False , lowerCAmelCase__ :List[Any]=False , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :str=None , **lowerCAmelCase__ :Union[str, Any] , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , mask_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case_ : str = vocab_size
snake_case_ : str = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : List[str] = num_attention_heads
snake_case_ : Any = intermediate_size
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Tuple = attention_probs_dropout_prob
snake_case_ : List[Any] = max_position_embeddings
snake_case_ : str = initializer_range
snake_case_ : List[Any] = layer_norm_eps
snake_case_ : str = position_embedding_type
snake_case_ : Optional[int] = use_cache
snake_case_ : str = emb_layer_norm_before
snake_case_ : List[Any] = token_dropout
snake_case_ : str = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
snake_case_ : Optional[Any] = EsmFoldConfig()
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : Union[str, Any] = EsmFoldConfig(**lowerCAmelCase__ )
snake_case_ : Optional[Any] = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
snake_case_ : List[str] = get_default_vocab_list()
else:
snake_case_ : List[str] = vocab_list
else:
snake_case_ : List[Any] = None
snake_case_ : int = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , lowerCAmelCase__ ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def _A ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case_ : Any = super().to_dict()
if isinstance(self.esmfold_config , lowerCAmelCase__ ):
snake_case_ : Optional[int] = self.esmfold_config.to_dict()
return output
@dataclass
class A_ :
"""simple docstring"""
a__ = None
a__ = True
a__ = False
a__ = False
a__ = False
a__ = 0
a__ = True
a__ = False
a__ = 128
a__ = None
def _A ( self :Dict ) -> int:
'''simple docstring'''
if self.trunk is None:
snake_case_ : Dict = TrunkConfig()
elif isinstance(self.trunk , lowerCAmelCase__ ):
snake_case_ : int = TrunkConfig(**self.trunk )
def _A ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = asdict(self )
snake_case_ : Optional[int] = self.trunk.to_dict()
return output
@dataclass
class A_ :
"""simple docstring"""
a__ = 48
a__ = 1024
a__ = 128
a__ = 32
a__ = 32
a__ = 32
a__ = 0
a__ = 0
a__ = False
a__ = 4
a__ = 128
a__ = None
def _A ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
if self.structure_module is None:
snake_case_ : Optional[int] = StructureModuleConfig()
elif isinstance(self.structure_module , lowerCAmelCase__ ):
snake_case_ : List[str] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
snake_case_ : Dict = self.sequence_state_dim // self.sequence_head_width
snake_case_ : Optional[int] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def _A ( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : int = asdict(self )
snake_case_ : Dict = self.structure_module.to_dict()
return output
@dataclass
class A_ :
"""simple docstring"""
a__ = 384
a__ = 128
a__ = 16
a__ = 128
a__ = 12
a__ = 4
a__ = 8
a__ = 0.1
a__ = 8
a__ = 1
a__ = 2
a__ = 7
a__ = 10
a__ = 1E-8
a__ = 1E5
def _A ( self :Dict ) -> Dict:
'''simple docstring'''
return asdict(self )
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 653 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase : List[str] = {
'''configuration_mask2former''': [
'''MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Mask2FormerConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = ['''Mask2FormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Mask2FormerForUniversalSegmentation''',
'''Mask2FormerModel''',
'''Mask2FormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
__lowerCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 653 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : Any = {
'''configuration_longformer''': [
'''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LongformerConfig''',
'''LongformerOnnxConfig''',
],
'''tokenization_longformer''': ['''LongformerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''LongformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
'''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongformerForMaskedLM''',
'''LongformerForMultipleChoice''',
'''LongformerForQuestionAnswering''',
'''LongformerForSequenceClassification''',
'''LongformerForTokenClassification''',
'''LongformerModel''',
'''LongformerPreTrainedModel''',
'''LongformerSelfAttention''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLongformerForMaskedLM''',
'''TFLongformerForMultipleChoice''',
'''TFLongformerForQuestionAnswering''',
'''TFLongformerForSequenceClassification''',
'''TFLongformerForTokenClassification''',
'''TFLongformerModel''',
'''TFLongformerPreTrainedModel''',
'''TFLongformerSelfAttention''',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
__lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 653 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCamelCase : Optional[Any] = {
'''configuration_transfo_xl''': ['''TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TransfoXLConfig'''],
'''tokenization_transfo_xl''': ['''TransfoXLCorpus''', '''TransfoXLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
'''TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AdaptiveEmbedding''',
'''TransfoXLForSequenceClassification''',
'''TransfoXLLMHeadModel''',
'''TransfoXLModel''',
'''TransfoXLPreTrainedModel''',
'''load_tf_weights_in_transfo_xl''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFAdaptiveEmbedding''',
'''TFTransfoXLForSequenceClassification''',
'''TFTransfoXLLMHeadModel''',
'''TFTransfoXLMainLayer''',
'''TFTransfoXLModel''',
'''TFTransfoXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 653 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__lowerCamelCase : Optional[int] = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class A_ :
"""simple docstring"""
def __init__( self :Tuple , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[Any]=16 , lowerCAmelCase__ :Any=13 , lowerCAmelCase__ :Optional[Any]=7 , lowerCAmelCase__ :str=14 , lowerCAmelCase__ :Union[str, Any]=10 , lowerCAmelCase__ :Tuple=19 , lowerCAmelCase__ :Optional[Any]=5 , lowerCAmelCase__ :Dict=4 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Any=16 , lowerCAmelCase__ :str=2 , lowerCAmelCase__ :List[Any]=4 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :str="gelu" , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :Optional[int]=[1, 2, 3, 4, 5] , lowerCAmelCase__ :str=25 , lowerCAmelCase__ :Optional[Any]=5 , ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = d_model
snake_case_ : Dict = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : Optional[Any] = prediction_length
snake_case_ : str = context_length
snake_case_ : Tuple = cardinality
snake_case_ : List[str] = num_time_features
snake_case_ : Optional[Any] = lags_sequence
snake_case_ : Union[str, Any] = embedding_dimension
snake_case_ : Optional[Any] = is_training
snake_case_ : Optional[Any] = hidden_size
snake_case_ : Any = num_hidden_layers
snake_case_ : Optional[Any] = num_attention_heads
snake_case_ : int = intermediate_size
snake_case_ : Any = hidden_act
snake_case_ : Union[str, Any] = hidden_dropout_prob
snake_case_ : Dict = attention_probs_dropout_prob
snake_case_ : List[str] = context_length
snake_case_ : Any = prediction_length + label_length
snake_case_ : Union[str, Any] = label_length
snake_case_ : List[Any] = moving_average
snake_case_ : str = autocorrelation_factor
def _A ( self :List[Any] ) -> Any:
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case_ : Any = config.context_length + max(config.lags_sequence )
snake_case_ : Union[str, Any] = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
snake_case_ : Optional[int] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
snake_case_ : List[Any] = floats_tensor([self.batch_size, _past_length] )
snake_case_ : Dict = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
snake_case_ : List[Any] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
snake_case_ : List[Any] = floats_tensor([self.batch_size, config.prediction_length] )
snake_case_ : int = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def _A ( self :Dict ) -> Tuple:
'''simple docstring'''
snake_case_ : str = self.get_config()
snake_case_ : int = self.prepare_autoformer_inputs_dict(lowerCAmelCase__ )
return config, inputs_dict
def _A ( self :Optional[int] ) -> Dict:
'''simple docstring'''
snake_case_, snake_case_ : Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def _A ( self :Tuple , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = AutoformerModel(config=lowerCAmelCase__ ).to(lowerCAmelCase__ ).eval()
snake_case_ : Optional[int] = model(**lowerCAmelCase__ )
snake_case_ : Any = outputs.encoder_last_hidden_state
snake_case_ : Dict = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Optional[Any] = model.get_encoder()
encoder.save_pretrained(lowerCAmelCase__ )
snake_case_ : Tuple = AutoformerEncoder.from_pretrained(lowerCAmelCase__ ).to(lowerCAmelCase__ )
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : List[str] = model.create_network_inputs(**lowerCAmelCase__ )
snake_case_, snake_case_ : Optional[int] = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
snake_case_ : List[Any] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
snake_case_ : Optional[int] = encoder(inputs_embeds=lowerCAmelCase__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
snake_case_ : Any = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
snake_case_ : List[str] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
snake_case_ : Optional[Any] = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
snake_case_ : Any = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : List[Any] = model.get_decoder()
decoder.save_pretrained(lowerCAmelCase__ )
snake_case_ : int = AutoformerDecoder.from_pretrained(lowerCAmelCase__ ).to(lowerCAmelCase__ )
snake_case_ : Tuple = decoder(
trend=lowerCAmelCase__ , inputs_embeds=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class A_ (a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a__ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
a__ = (AutoformerForPrediction,) if is_torch_available() else ()
a__ = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def _A ( self :Dict ) -> int:
'''simple docstring'''
snake_case_ : Tuple = AutoformerModelTester(self )
snake_case_ : str = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def _A ( self :List[str] ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def _A ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_, snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
snake_case_ : List[Any] = model_class(lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase__ )
snake_case_, snake_case_ : str = model_class.from_pretrained(lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
self.assertEqual(info["missing_keys"] , [] )
def _A ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
snake_case_ : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*lowerCAmelCase__ )
@unittest.skip(reason="Model has no tokens embeddings" )
def _A ( self :str ) -> str:
'''simple docstring'''
pass
def _A ( self :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[Any] = inspect.signature(getattr(lowerCAmelCase__ , "forward" ) )
# The main input is the name of the argument after `self`
snake_case_ : Dict = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , lowerCAmelCase__ )
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_, snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Tuple = model_class(lowerCAmelCase__ )
snake_case_ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : Optional[Any] = [*signature.parameters.keys()]
snake_case_ : Dict = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(lowerCAmelCase__ )] , lowerCAmelCase__ )
def _A ( self :int ) -> Any:
'''simple docstring'''
snake_case_, snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Union[str, Any] = True
snake_case_ : List[str] = getattr(self.model_tester , "seq_length" , lowerCAmelCase__ )
snake_case_ : Dict = getattr(self.model_tester , "decoder_seq_length" , lowerCAmelCase__ )
snake_case_ : Union[str, Any] = getattr(self.model_tester , "encoder_seq_length" , lowerCAmelCase__ )
snake_case_ : Union[str, Any] = getattr(self.model_tester , "d_model" , lowerCAmelCase__ )
snake_case_ : Dict = getattr(self.model_tester , "num_attention_heads" , lowerCAmelCase__ )
snake_case_ : Optional[int] = d_model // num_attention_heads
for model_class in self.all_model_classes:
snake_case_ : Any = True
snake_case_ : Any = False
snake_case_ : Dict = True
snake_case_ : List[str] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case_ : Tuple = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
snake_case_ : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case_ : Optional[int] = True
snake_case_ : Any = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case_ : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
snake_case_ : str = outputs.encoder_attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
snake_case_ : Tuple = len(lowerCAmelCase__ )
snake_case_ : List[str] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# decoder attentions
snake_case_ : Optional[int] = outputs.decoder_attentions
self.assertIsInstance(lowerCAmelCase__ , (list, tuple) )
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
snake_case_ : List[Any] = outputs.cross_attentions
self.assertIsInstance(lowerCAmelCase__ , (list, tuple) )
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
snake_case_ : Optional[int] = True
snake_case_ : List[Any] = True
snake_case_ : Union[str, Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case_ : List[Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(out_len + 2 , len(lowerCAmelCase__ ) )
snake_case_ : Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def _A ( self :Any ) -> Optional[Any]:
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def __UpperCAmelCase ( __magic_name__="train-batch.pt" )-> int:
"""simple docstring"""
snake_case_ : List[str] = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" ,filename=__magic_name__ ,repo_type="dataset" )
snake_case_ : List[str] = torch.load(__magic_name__ ,map_location=__magic_name__ )
return batch
@require_torch
@slow
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :str ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ )
snake_case_ : List[str] = prepare_batch()
with torch.no_grad():
snake_case_ : int = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
snake_case_ : Optional[int] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , lowerCAmelCase__ )
snake_case_ : Optional[Any] = torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=lowerCAmelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
def _A ( self :Any ) -> str:
'''simple docstring'''
snake_case_ : str = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ )
snake_case_ : Optional[Any] = prepare_batch("val-batch.pt" )
with torch.no_grad():
snake_case_ : Tuple = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
snake_case_ : Dict = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , lowerCAmelCase__ )
snake_case_ : Any = torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=lowerCAmelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
def _A ( self :List[str] ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ )
snake_case_ : str = prepare_batch("val-batch.pt" )
with torch.no_grad():
snake_case_ : Optional[Any] = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
snake_case_ : List[Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , lowerCAmelCase__ )
snake_case_ : Dict = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=lowerCAmelCase__ )
snake_case_ : Optional[Any] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , lowerCAmelCase__ , rtol=1E-1 ) )
| 653 | 1 |
'''simple docstring'''
import torch
from diffusers import DiffusionPipeline
class A_ (a_ ):
"""simple docstring"""
def __init__( self :str , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Optional[int] ) -> str:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
def __call__( self :Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case_ : List[Any] = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
snake_case_ : Tuple = 1
snake_case_ : Optional[Any] = self.unet(lowerCAmelCase__ , lowerCAmelCase__ ).sample
snake_case_ : int = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample
snake_case_ : Optional[Any] = scheduler_output - scheduler_output + torch.ones_like(lowerCAmelCase__ )
return result
| 653 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = RobertaTokenizer
a__ = RobertaTokenizerFast
a__ = True
a__ = {'''cls_token''': '''<s>'''}
def _A ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case_ : List[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
snake_case_ : Tuple = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
snake_case_ : List[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
snake_case_ : int = {"unk_token": "<unk>"}
snake_case_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
def _A ( self :Optional[Any] , **lowerCAmelCase__ :str ) -> str:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _A ( self :Any , **lowerCAmelCase__ :Tuple ) -> Optional[int]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _A ( self :Optional[int] , lowerCAmelCase__ :str ) -> Optional[int]:
'''simple docstring'''
snake_case_ : int = "lower newer"
snake_case_ : Tuple = "lower newer"
return input_text, output_text
def _A ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case_ : Dict = "lower newer"
snake_case_ : int = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
snake_case_ : str = tokenizer.tokenize(lowerCAmelCase__ ) # , add_prefix_space=True)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[str] = tokens + [tokenizer.unk_token]
snake_case_ : Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _A ( self :Any ) -> str:
'''simple docstring'''
snake_case_ : List[str] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=lowerCAmelCase__ ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=lowerCAmelCase__ ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def _A ( self :str ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = self.tokenizer_class.from_pretrained("roberta-base" )
snake_case_ : List[str] = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase__ )
snake_case_ : List[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase__ )
snake_case_ : List[str] = tokenizer.encode(
"sequence builders" , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
snake_case_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
snake_case_ : Any = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _A ( self :List[Any] ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = self.get_tokenizer()
snake_case_ : Tuple = "Encode this sequence."
snake_case_ : Optional[Any] = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
snake_case_ : str = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
snake_case_ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
snake_case_ : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
snake_case_ : str = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
snake_case_ : int = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Testing spaces after special tokens
snake_case_ : List[Any] = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )} ) # mask token has a left space
snake_case_ : str = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
snake_case_ : List[str] = "Encode <mask> sequence"
snake_case_ : List[Any] = "Encode <mask>sequence"
snake_case_ : Tuple = tokenizer.encode(lowerCAmelCase__ )
snake_case_ : int = encoded.index(lowerCAmelCase__ )
snake_case_ : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[str] = tokenizer.encode(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = encoded.index(lowerCAmelCase__ )
snake_case_ : int = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _A ( self :Tuple ) -> Tuple:
'''simple docstring'''
pass
def _A ( self :int ) -> Optional[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case_ : List[Any] = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case_ : Any = "A, <mask> AllenNLP sentence."
snake_case_ : str = tokenizer_r.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
snake_case_ : int = tokenizer_p.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
snake_case_ : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
snake_case_ : str = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def _A ( self :int ) -> Tuple:
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
snake_case_ : str = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
snake_case_ : Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , lowerCAmelCase__ )
self.assertEqual(post_processor_state["add_prefix_space"] , lowerCAmelCase__ )
self.assertEqual(post_processor_state["trim_offsets"] , lowerCAmelCase__ )
def _A ( self :List[str] ) -> List[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ : str = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
snake_case_ : Tuple = F'''{text_of_1_token} {text_of_1_token}'''
snake_case_ : Any = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : List[str] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Tuple = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ), len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : Dict = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : str = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ), len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : Tuple = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
snake_case_ : Dict = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ) + 1, 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : Any = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Any = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ), 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Optional[int] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ), 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
| 653 | 1 |
'''simple docstring'''
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]:
"""simple docstring"""
snake_case_ : int = model.config
snake_case_ : int = DonutSwinConfig(
image_size=original_config.input_size ,patch_size=4 ,depths=original_config.encoder_layer ,num_heads=[4, 8, 16, 32] ,window_size=original_config.window_size ,embed_dim=128 ,)
snake_case_ : Any = MBartConfig(
is_decoder=__magic_name__ ,is_encoder_decoder=__magic_name__ ,add_cross_attention=__magic_name__ ,decoder_layers=original_config.decoder_layer ,max_position_embeddings=original_config.max_position_embeddings ,vocab_size=len(
model.decoder.tokenizer ) ,scale_embedding=__magic_name__ ,add_final_layer_norm=__magic_name__ ,)
return encoder_config, decoder_config
def __UpperCAmelCase ( __magic_name__ )-> Optional[Any]:
"""simple docstring"""
if "encoder.model" in name:
snake_case_ : Union[str, Any] = name.replace("encoder.model" ,"encoder" )
if "decoder.model" in name:
snake_case_ : Any = name.replace("decoder.model" ,"decoder" )
if "patch_embed.proj" in name:
snake_case_ : Optional[Any] = name.replace("patch_embed.proj" ,"embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
snake_case_ : List[str] = name.replace("patch_embed.norm" ,"embeddings.norm" )
if name.startswith("encoder" ):
if "layers" in name:
snake_case_ : Any = "encoder." + name
if "attn.proj" in name:
snake_case_ : str = name.replace("attn.proj" ,"attention.output.dense" )
if "attn" in name and "mask" not in name:
snake_case_ : str = name.replace("attn" ,"attention.self" )
if "norm1" in name:
snake_case_ : int = name.replace("norm1" ,"layernorm_before" )
if "norm2" in name:
snake_case_ : str = name.replace("norm2" ,"layernorm_after" )
if "mlp.fc1" in name:
snake_case_ : str = name.replace("mlp.fc1" ,"intermediate.dense" )
if "mlp.fc2" in name:
snake_case_ : List[str] = name.replace("mlp.fc2" ,"output.dense" )
if name == "encoder.norm.weight":
snake_case_ : Union[str, Any] = "encoder.layernorm.weight"
if name == "encoder.norm.bias":
snake_case_ : Dict = "encoder.layernorm.bias"
return name
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Dict:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
snake_case_ : Union[str, Any] = orig_state_dict.pop(__magic_name__ )
if "qkv" in key:
snake_case_ : Optional[Any] = key.split("." )
snake_case_ : str = int(key_split[3] )
snake_case_ : Any = int(key_split[5] )
snake_case_ : Dict = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
snake_case_ : Dict = val[:dim, :]
snake_case_ : List[str] = val[dim : dim * 2, :]
snake_case_ : str = val[-dim:, :]
else:
snake_case_ : Union[str, Any] = val[:dim]
snake_case_ : List[Any] = val[dim : dim * 2]
snake_case_ : Any = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
snake_case_ : str = val
return orig_state_dict
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=None ,__magic_name__=False )-> List[str]:
"""simple docstring"""
snake_case_ : Optional[int] = DonutModel.from_pretrained(__magic_name__ ).eval()
# load HuggingFace model
snake_case_, snake_case_ : str = get_configs(__magic_name__ )
snake_case_ : Optional[int] = DonutSwinModel(__magic_name__ )
snake_case_ : Any = MBartForCausalLM(__magic_name__ )
snake_case_ : List[str] = VisionEncoderDecoderModel(encoder=__magic_name__ ,decoder=__magic_name__ )
model.eval()
snake_case_ : str = original_model.state_dict()
snake_case_ : Optional[Any] = convert_state_dict(__magic_name__ ,__magic_name__ )
model.load_state_dict(__magic_name__ )
# verify results on scanned document
snake_case_ : Union[str, Any] = load_dataset("hf-internal-testing/example-documents" )
snake_case_ : str = dataset["test"][0]["image"].convert("RGB" )
snake_case_ : Any = XLMRobertaTokenizerFast.from_pretrained(__magic_name__ ,from_slow=__magic_name__ )
snake_case_ : List[str] = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis ,size=original_model.config.input_size[::-1] )
snake_case_ : List[str] = DonutProcessor(__magic_name__ ,__magic_name__ )
snake_case_ : List[Any] = processor(__magic_name__ ,return_tensors="pt" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
snake_case_ : Tuple = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
snake_case_ : List[Any] = "When is the coffee break?"
snake_case_ : Any = task_prompt.replace("{user_input}" ,__magic_name__ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
snake_case_ : Tuple = "<s_rvlcdip>"
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
snake_case_ : Union[str, Any] = "<s_cord>"
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
snake_case_ : List[str] = "s_cord-v2>"
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
snake_case_ : Tuple = "<s_zhtrainticket>"
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
snake_case_ : Dict = "hello world"
else:
raise ValueError("Model name not supported" )
snake_case_ : Optional[int] = original_model.decoder.tokenizer(__magic_name__ ,add_special_tokens=__magic_name__ ,return_tensors="pt" )[
"input_ids"
]
snake_case_ : str = original_model.encoder.model.patch_embed(__magic_name__ )
snake_case_, snake_case_ : Union[str, Any] = model.encoder.embeddings(__magic_name__ )
assert torch.allclose(__magic_name__ ,__magic_name__ ,atol=1E-3 )
# verify encoder hidden states
snake_case_ : Optional[Any] = original_model.encoder(__magic_name__ )
snake_case_ : Any = model.encoder(__magic_name__ ).last_hidden_state
assert torch.allclose(__magic_name__ ,__magic_name__ ,atol=1E-2 )
# verify decoder hidden states
snake_case_ : Optional[int] = original_model(__magic_name__ ,__magic_name__ ,__magic_name__ ).logits
snake_case_ : Union[str, Any] = model(__magic_name__ ,decoder_input_ids=__magic_name__ ).logits
assert torch.allclose(__magic_name__ ,__magic_name__ ,atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(__magic_name__ )
processor.save_pretrained(__magic_name__ )
if push_to_hub:
model.push_to_hub("nielsr/" + model_name.split("/" )[-1] ,commit_message="Update model" )
processor.push_to_hub("nielsr/" + model_name.split("/" )[-1] ,commit_message="Update model" )
if __name__ == "__main__":
__lowerCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''naver-clova-ix/donut-base-finetuned-docvqa''',
required=False,
type=str,
help='''Name of the original model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
required=False,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub.''',
)
__lowerCamelCase : Any = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 653 |
'''simple docstring'''
import math
def __UpperCAmelCase ( __magic_name__ )-> bool:
"""simple docstring"""
snake_case_ : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__magic_name__ )
def __UpperCAmelCase ( __magic_name__ = 1 / 1_2345 )-> int:
"""simple docstring"""
snake_case_ : Any = 0
snake_case_ : int = 0
snake_case_ : Union[str, Any] = 3
while True:
snake_case_ : Any = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__magic_name__ ):
snake_case_ : Optional[Any] = int(__magic_name__ )
total_partitions += 1
if check_partition_perfect(__magic_name__ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__magic_name__ )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 653 | 1 |
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" ,["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" ,["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" ,[None, "v2"] )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Optional[int]:
"""simple docstring"""
snake_case_ : Union[str, Any] = hf_hub_url(repo_id=__magic_name__ ,path=__magic_name__ ,revision=__magic_name__ )
assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(__magic_name__ )}'''
| 653 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : int = logging.get_logger()
@dataclass
class A_ :
"""simple docstring"""
a__ = 42
a__ = field(default_factory=a_ )
a__ = field(default_factory=a_ )
def _A ( self :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tensor , lowerCAmelCase__ :Tensor ) -> int:
'''simple docstring'''
snake_case_ : int = len(list(m.modules() ) ) == 1 or isinstance(lowerCAmelCase__ , nn.Convad ) or isinstance(lowerCAmelCase__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowerCAmelCase__ )
def __call__( self :List[Any] , lowerCAmelCase__ :Tensor ) -> Union[str, Any]:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowerCAmelCase__ )
[x.remove() for x in self.handles]
return self
@property
def _A ( self :int ) -> List[Any]:
'''simple docstring'''
return list(filter(lambda lowerCAmelCase__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class A_ :
"""simple docstring"""
a__ = 42
a__ = 42
a__ = 0
a__ = field(default_factory=a_ )
a__ = field(default_factory=a_ )
def __call__( self :Tuple , lowerCAmelCase__ :Tensor ) -> Tuple:
'''simple docstring'''
snake_case_ : List[Any] = Tracker(self.dest )(lowerCAmelCase__ ).parametrized
snake_case_ : Tuple = Tracker(self.src )(lowerCAmelCase__ ).parametrized
snake_case_ : List[str] = list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.src_skip , lowerCAmelCase__ ) )
snake_case_ : Tuple = list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.dest_skip , lowerCAmelCase__ ) )
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise Exception(
F'''Numbers of operations are different. Source module has {len(lowerCAmelCase__ )} operations while'''
F''' destination module has {len(lowerCAmelCase__ )}.''' )
for dest_m, src_m in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ = True )-> Optional[int]:
"""simple docstring"""
print(F'''Converting {name}...''' )
with torch.no_grad():
snake_case_ : List[str] = timm.create_model(__magic_name__ ,pretrained=__magic_name__ ).eval()
snake_case_ : Optional[int] = ResNetForImageClassification(__magic_name__ ).eval()
snake_case_ : Dict = ModuleTransfer(src=__magic_name__ ,dest=__magic_name__ )
snake_case_ : Optional[int] = torch.randn((1, 3, 224, 224) )
module_transfer(__magic_name__ )
assert torch.allclose(from_model(__magic_name__ ) ,our_model(__magic_name__ ).logits ), "The model logits don't match the original one."
snake_case_ : str = F'''resnet{'-'.join(name.split('resnet' ) )}'''
print(__magic_name__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name ,commit_message="Add model" ,use_temp_dir=__magic_name__ ,)
# we can use the convnext one
snake_case_ : Optional[Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name ,commit_message="Add image processor" ,use_temp_dir=__magic_name__ ,)
print(F'''Pushed {checkpoint_name}''' )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = None ,__magic_name__ = True )-> Tuple:
"""simple docstring"""
snake_case_ : List[str] = "imagenet-1k-id2label.json"
snake_case_ : Optional[Any] = 1000
snake_case_ : List[Any] = (1, num_labels)
snake_case_ : Optional[Any] = "huggingface/label-files"
snake_case_ : Dict = num_labels
snake_case_ : List[Any] = json.load(open(hf_hub_download(__magic_name__ ,__magic_name__ ,repo_type="dataset" ) ,"r" ) )
snake_case_ : List[str] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case_ : Any = idalabel
snake_case_ : List[Any] = {v: k for k, v in idalabel.items()}
snake_case_ : Optional[int] = partial(__magic_name__ ,num_labels=__magic_name__ ,idalabel=__magic_name__ ,labelaid=__magic_name__ )
snake_case_ : Optional[int] = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] ,hidden_sizes=[64, 128, 256, 512] ,layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] ,hidden_sizes=[64, 128, 256, 512] ,layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(__magic_name__ ,names_to_config[model_name] ,__magic_name__ ,__magic_name__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
return config, expected_shape
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
__lowerCamelCase : Tuple = parser.parse_args()
__lowerCamelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 653 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ )-> bool:
"""simple docstring"""
if not isinstance(__magic_name__ ,__magic_name__ ):
snake_case_ : Optional[int] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(__magic_name__ )
if number < 0:
return False
snake_case_ : Dict = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 653 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''roc_bert'''
def __init__( self :Dict , lowerCAmelCase__ :Optional[Any]=30_522 , lowerCAmelCase__ :Dict=768 , lowerCAmelCase__ :str=12 , lowerCAmelCase__ :Optional[int]=12 , lowerCAmelCase__ :Optional[Any]=3_072 , lowerCAmelCase__ :Any="gelu" , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :List[str]=512 , lowerCAmelCase__ :int=2 , lowerCAmelCase__ :Optional[int]=0.0_2 , lowerCAmelCase__ :Tuple=1E-1_2 , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :List[str]=0 , lowerCAmelCase__ :Optional[Any]="absolute" , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :List[str]=768 , lowerCAmelCase__ :Optional[Any]=910 , lowerCAmelCase__ :str=512 , lowerCAmelCase__ :int=24_858 , lowerCAmelCase__ :List[Any]=True , **lowerCAmelCase__ :int , ) -> List[str]:
'''simple docstring'''
snake_case_ : int = vocab_size
snake_case_ : Dict = max_position_embeddings
snake_case_ : int = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : int = intermediate_size
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Optional[int] = hidden_dropout_prob
snake_case_ : List[Any] = attention_probs_dropout_prob
snake_case_ : Dict = initializer_range
snake_case_ : str = type_vocab_size
snake_case_ : Tuple = layer_norm_eps
snake_case_ : Optional[Any] = use_cache
snake_case_ : Optional[Any] = enable_pronunciation
snake_case_ : List[Any] = enable_shape
snake_case_ : Optional[int] = pronunciation_embed_dim
snake_case_ : Dict = pronunciation_vocab_size
snake_case_ : int = shape_embed_dim
snake_case_ : Any = shape_vocab_size
snake_case_ : Optional[int] = concat_input
snake_case_ : List[Any] = position_embedding_type
snake_case_ : Any = classifier_dropout
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
| 653 | 1 |
'''simple docstring'''
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
__lowerCamelCase : int = logging.getLogger()
def __UpperCAmelCase ( )-> List[Any]:
"""simple docstring"""
snake_case_ : Any = argparse.ArgumentParser()
parser.add_argument("-f" )
snake_case_ : Any = parser.parse_args()
return args.f
class A_ (a_ ):
"""simple docstring"""
def _A ( self :Any ) -> None:
'''simple docstring'''
snake_case_ : Optional[Any] = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCAmelCase__ )
def _A ( self :Tuple , lowerCAmelCase__ :Tuple ) -> Any:
'''simple docstring'''
snake_case_ : Tuple = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , "run_glue_deebert.py" )
with patch.object(lowerCAmelCase__ , "argv" , lowerCAmelCase__ ):
snake_case_ : Union[str, Any] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCAmelCase__ , 0.6_6_6 )
@slow
@require_torch_non_multi_gpu
def _A ( self :List[Any] ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[int] = "\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n ".split()
self.run_and_check(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowerCAmelCase__ )
snake_case_ : Tuple = "\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n ".split()
self.run_and_check(lowerCAmelCase__ )
| 653 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
def update_area_of_max_square(__magic_name__ ,__magic_name__ ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
snake_case_ : str = update_area_of_max_square(__magic_name__ ,col + 1 )
snake_case_ : Dict = update_area_of_max_square(row + 1 ,col + 1 )
snake_case_ : int = update_area_of_max_square(row + 1 ,__magic_name__ )
if mat[row][col]:
snake_case_ : str = 1 + min([right, diagonal, down] )
snake_case_ : Tuple = max(largest_square_area[0] ,__magic_name__ )
return sub_problem_sol
else:
return 0
snake_case_ : Union[str, Any] = [0]
update_area_of_max_square(0 ,0 )
return largest_square_area[0]
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
__magic_name__ ,__magic_name__ ,__magic_name__ ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
snake_case_ : Dict = update_area_of_max_square_using_dp_array(__magic_name__ ,col + 1 ,__magic_name__ )
snake_case_ : List[Any] = update_area_of_max_square_using_dp_array(row + 1 ,col + 1 ,__magic_name__ )
snake_case_ : Any = update_area_of_max_square_using_dp_array(row + 1 ,__magic_name__ ,__magic_name__ )
if mat[row][col]:
snake_case_ : int = 1 + min([right, diagonal, down] )
snake_case_ : Tuple = max(largest_square_area[0] ,__magic_name__ )
snake_case_ : Optional[Any] = sub_problem_sol
return sub_problem_sol
else:
return 0
snake_case_ : List[Any] = [0]
snake_case_ : Optional[int] = [[-1] * cols for _ in range(__magic_name__ )]
update_area_of_max_square_using_dp_array(0 ,0 ,__magic_name__ )
return largest_square_area[0]
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
snake_case_ : Dict = [[0] * (cols + 1) for _ in range(rows + 1 )]
snake_case_ : Dict = 0
for row in range(rows - 1 ,-1 ,-1 ):
for col in range(cols - 1 ,-1 ,-1 ):
snake_case_ : List[str] = dp_array[row][col + 1]
snake_case_ : Any = dp_array[row + 1][col + 1]
snake_case_ : Any = dp_array[row + 1][col]
if mat[row][col] == 1:
snake_case_ : Any = 1 + min(__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : str = max(dp_array[row][col] ,__magic_name__ )
else:
snake_case_ : Optional[Any] = 0
return largest_square_area
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
snake_case_ : str = [0] * (cols + 1)
snake_case_ : Tuple = [0] * (cols + 1)
snake_case_ : List[str] = 0
for row in range(rows - 1 ,-1 ,-1 ):
for col in range(cols - 1 ,-1 ,-1 ):
snake_case_ : Optional[Any] = current_row[col + 1]
snake_case_ : Optional[int] = next_row[col + 1]
snake_case_ : Dict = next_row[col]
if mat[row][col] == 1:
snake_case_ : Union[str, Any] = 1 + min(__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : Any = max(current_row[col] ,__magic_name__ )
else:
snake_case_ : Dict = 0
snake_case_ : Optional[Any] = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 653 | 1 |
'''simple docstring'''
from PIL import Image
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Image:
"""simple docstring"""
def brightness(__magic_name__ ) -> float:
return 128 + level + (c - 128)
if not -255.0 <= level <= 255.0:
raise ValueError("level must be between -255.0 (black) and 255.0 (white)" )
return img.point(__magic_name__ )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change brightness to 100
__lowerCamelCase : List[str] = change_brightness(img, 100)
brigt_img.save('''image_data/lena_brightness.png''', format='''png''')
| 653 |
'''simple docstring'''
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=7 )-> Tuple:
"""simple docstring"""
snake_case_ : List[str] = None
if token is not None:
snake_case_ : List[str] = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
snake_case_ : Dict = "636036"
snake_case_ : List[str] = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
snake_case_ : Optional[Any] = requests.get(__magic_name__ ,headers=__magic_name__ ).json()
return result["workflow_runs"]
def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]:
"""simple docstring"""
snake_case_ : str = get_daily_ci_runs(__magic_name__ )
snake_case_ : Optional[int] = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
snake_case_ : Dict = workflow_run["id"]
break
return workflow_run_id
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = get_last_daily_ci_runs(__magic_name__ )
if workflow_run_id is not None:
snake_case_ : Union[str, Any] = get_artifacts_links(worflow_run_id=__magic_name__ ,token=__magic_name__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
snake_case_ : Union[str, Any] = artifacts_links[artifact_name]
download_artifact(
artifact_name=__magic_name__ ,artifact_url=__magic_name__ ,output_dir=__magic_name__ ,token=__magic_name__ )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[Any]:
"""simple docstring"""
get_last_daily_ci_artifacts(__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : Union[str, Any] = {}
for artifact_name in artifact_names:
snake_case_ : Any = os.path.join(__magic_name__ ,F'''{artifact_name}.zip''' )
if os.path.isfile(__magic_name__ ):
snake_case_ : Tuple = {}
with zipfile.ZipFile(__magic_name__ ) as z:
for filename in z.namelist():
if not os.path.isdir(__magic_name__ ):
# read the file
with z.open(__magic_name__ ) as f:
snake_case_ : Optional[Any] = f.read().decode("UTF-8" )
return results
| 653 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
def update_area_of_max_square(__magic_name__ ,__magic_name__ ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
snake_case_ : str = update_area_of_max_square(__magic_name__ ,col + 1 )
snake_case_ : Dict = update_area_of_max_square(row + 1 ,col + 1 )
snake_case_ : int = update_area_of_max_square(row + 1 ,__magic_name__ )
if mat[row][col]:
snake_case_ : str = 1 + min([right, diagonal, down] )
snake_case_ : Tuple = max(largest_square_area[0] ,__magic_name__ )
return sub_problem_sol
else:
return 0
snake_case_ : Union[str, Any] = [0]
update_area_of_max_square(0 ,0 )
return largest_square_area[0]
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
__magic_name__ ,__magic_name__ ,__magic_name__ ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
snake_case_ : Dict = update_area_of_max_square_using_dp_array(__magic_name__ ,col + 1 ,__magic_name__ )
snake_case_ : List[Any] = update_area_of_max_square_using_dp_array(row + 1 ,col + 1 ,__magic_name__ )
snake_case_ : Any = update_area_of_max_square_using_dp_array(row + 1 ,__magic_name__ ,__magic_name__ )
if mat[row][col]:
snake_case_ : int = 1 + min([right, diagonal, down] )
snake_case_ : Tuple = max(largest_square_area[0] ,__magic_name__ )
snake_case_ : Optional[Any] = sub_problem_sol
return sub_problem_sol
else:
return 0
snake_case_ : List[Any] = [0]
snake_case_ : Optional[int] = [[-1] * cols for _ in range(__magic_name__ )]
update_area_of_max_square_using_dp_array(0 ,0 ,__magic_name__ )
return largest_square_area[0]
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
snake_case_ : Dict = [[0] * (cols + 1) for _ in range(rows + 1 )]
snake_case_ : Dict = 0
for row in range(rows - 1 ,-1 ,-1 ):
for col in range(cols - 1 ,-1 ,-1 ):
snake_case_ : List[str] = dp_array[row][col + 1]
snake_case_ : Any = dp_array[row + 1][col + 1]
snake_case_ : Any = dp_array[row + 1][col]
if mat[row][col] == 1:
snake_case_ : Any = 1 + min(__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : str = max(dp_array[row][col] ,__magic_name__ )
else:
snake_case_ : Optional[Any] = 0
return largest_square_area
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
snake_case_ : str = [0] * (cols + 1)
snake_case_ : Tuple = [0] * (cols + 1)
snake_case_ : List[str] = 0
for row in range(rows - 1 ,-1 ,-1 ):
for col in range(cols - 1 ,-1 ,-1 ):
snake_case_ : Optional[Any] = current_row[col + 1]
snake_case_ : Optional[int] = next_row[col + 1]
snake_case_ : Dict = next_row[col]
if mat[row][col] == 1:
snake_case_ : Union[str, Any] = 1 + min(__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : Any = max(current_row[col] ,__magic_name__ )
else:
snake_case_ : Dict = 0
snake_case_ : Optional[Any] = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 653 |
'''simple docstring'''
from string import ascii_uppercase
__lowerCamelCase : Optional[Any] = {char: i for i, char in enumerate(ascii_uppercase)}
__lowerCamelCase : List[str] = dict(enumerate(ascii_uppercase))
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Tuple = len(__magic_name__ )
snake_case_ : str = 0
while True:
if x == i:
snake_case_ : List[str] = 0
if len(__magic_name__ ) == len(__magic_name__ ):
break
key += key[i]
i += 1
return key
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : str = ""
snake_case_ : List[Any] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
snake_case_ : Optional[Any] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Dict = ""
snake_case_ : Dict = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
snake_case_ : str = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __UpperCAmelCase ( )-> None:
"""simple docstring"""
snake_case_ : List[str] = "THE GERMAN ATTACK"
snake_case_ : List[str] = "SECRET"
snake_case_ : Optional[int] = generate_key(__magic_name__ ,__magic_name__ )
snake_case_ : Any = cipher_text(__magic_name__ ,__magic_name__ )
print(F'''Encrypted Text = {s}''' )
print(F'''Original Text = {original_text(__magic_name__ ,__magic_name__ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 653 | 1 |
'''simple docstring'''
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
__lowerCamelCase : List[Any] = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
__lowerCamelCase : str = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
__lowerCamelCase : List[str] = '''zero2'''
__lowerCamelCase : Tuple = '''zero3'''
__lowerCamelCase : Tuple = [ZEROa, ZEROa]
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Dict = parameterized.to_safe_name("_".join(str(__magic_name__ ) for x in param.args ) )
return F'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
__lowerCamelCase : str = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class A_ (a_ ):
"""simple docstring"""
@parameterized.expand(lowerCAmelCase__ , name_func=lowerCAmelCase__ )
def _A ( self :Optional[int] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Tuple ) -> Optional[int]:
'''simple docstring'''
self.run_and_check(
stage=lowerCAmelCase__ , model=lowerCAmelCase__ , distributed=lowerCAmelCase__ , fpaa=lowerCAmelCase__ , )
@require_torch_multi_gpu
@parameterized.expand(lowerCAmelCase__ , name_func=lowerCAmelCase__ )
def _A ( self :List[str] , lowerCAmelCase__ :str , lowerCAmelCase__ :Any ) -> Any:
'''simple docstring'''
self.run_and_check(
stage=lowerCAmelCase__ , model=lowerCAmelCase__ , distributed=lowerCAmelCase__ , fpaa=lowerCAmelCase__ , )
@parameterized.expand(lowerCAmelCase__ , name_func=lowerCAmelCase__ )
def _A ( self :str , lowerCAmelCase__ :Any , lowerCAmelCase__ :Optional[Any] ) -> Tuple:
'''simple docstring'''
self.run_and_check(
stage=lowerCAmelCase__ , model=lowerCAmelCase__ , distributed=lowerCAmelCase__ , fpaa=lowerCAmelCase__ , )
@require_torch_multi_gpu
@parameterized.expand(lowerCAmelCase__ , name_func=lowerCAmelCase__ )
def _A ( self :List[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :int ) -> Optional[int]:
'''simple docstring'''
self.run_and_check(
stage=lowerCAmelCase__ , model=lowerCAmelCase__ , distributed=lowerCAmelCase__ , fpaa=lowerCAmelCase__ , )
def _A ( self :int , lowerCAmelCase__ :Dict ) -> Union[str, Any]:
'''simple docstring'''
pass
def _A ( self :List[str] , lowerCAmelCase__ :str , lowerCAmelCase__ :str , lowerCAmelCase__ :int = 10 , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :bool = True , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[Any] = models[model]
snake_case_ : Optional[Any] = self.run_trainer(
stage=lowerCAmelCase__ , model_name=lowerCAmelCase__ , eval_steps=lowerCAmelCase__ , num_train_epochs=1 , distributed=lowerCAmelCase__ , fpaa=lowerCAmelCase__ , )
self.do_checks(lowerCAmelCase__ )
return output_dir
def _A ( self :Union[str, Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :str , lowerCAmelCase__ :int = 10 , lowerCAmelCase__ :int = 1 , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :bool = True , ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[str] = self.get_auto_remove_tmp_dir("./xxx" , after=lowerCAmelCase__ )
snake_case_ : int = F'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(lowerCAmelCase__ )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(["--fp16"] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
snake_case_ : Union[str, Any] = F'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
snake_case_ : Dict = [F'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
snake_case_ : Union[str, Any] = self.get_launcher(lowerCAmelCase__ )
snake_case_ : Optional[Any] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowerCAmelCase__ , env=self.get_env() )
return output_dir
def _A ( self :Union[str, Any] , lowerCAmelCase__ :Optional[Any]=False ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = min(2 , get_gpu_count() ) if distributed else 1
return F'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
| 653 |
'''simple docstring'''
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Dict:
"""simple docstring"""
snake_case_ : Tuple = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
snake_case_ : Union[str, Any] = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(__magic_name__ ):
os.makedirs(__magic_name__ )
snake_case_ : str = model.state_dict()
def to_tf_var_name(__magic_name__ ):
for patt, repl in iter(__magic_name__ ):
snake_case_ : List[str] = name.replace(__magic_name__ ,__magic_name__ )
return F'''bert/{name}'''
def create_tf_var(__magic_name__ ,__magic_name__ ,__magic_name__ ):
snake_case_ : List[Any] = tf.dtypes.as_dtype(tensor.dtype )
snake_case_ : Union[str, Any] = tf.get_variable(dtype=__magic_name__ ,shape=tensor.shape ,name=__magic_name__ ,initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__magic_name__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
snake_case_ : Optional[int] = to_tf_var_name(__magic_name__ )
snake_case_ : Dict = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
snake_case_ : List[Any] = torch_tensor.T
snake_case_ : Union[str, Any] = create_tf_var(tensor=__magic_name__ ,name=__magic_name__ ,session=__magic_name__ )
tf.keras.backend.set_value(__magic_name__ ,__magic_name__ )
snake_case_ : List[str] = session.run(__magic_name__ )
print(F'''Successfully created {tf_name}: {np.allclose(__magic_name__ ,__magic_name__ )}''' )
snake_case_ : Any = tf.train.Saver(tf.trainable_variables() )
saver.save(__magic_name__ ,os.path.join(__magic_name__ ,model_name.replace("-" ,"_" ) + ".ckpt" ) )
def __UpperCAmelCase ( __magic_name__=None )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Any = argparse.ArgumentParser()
parser.add_argument("--model_name" ,type=__magic_name__ ,required=__magic_name__ ,help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" ,type=__magic_name__ ,default=__magic_name__ ,required=__magic_name__ ,help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" ,type=__magic_name__ ,required=__magic_name__ ,help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" ,type=__magic_name__ ,required=__magic_name__ ,help="Directory in which to save tensorflow model" )
snake_case_ : Optional[int] = parser.parse_args(__magic_name__ )
snake_case_ : Optional[int] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name ,state_dict=torch.load(args.pytorch_model_path ) ,cache_dir=args.cache_dir ,)
convert_pytorch_checkpoint_to_tf(model=__magic_name__ ,ckpt_dir=args.tf_cache_dir ,model_name=args.model_name )
if __name__ == "__main__":
main()
| 653 | 1 |
'''simple docstring'''
import numpy as np
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : int = int(np.ceil((x_end - xa) / h ) )
snake_case_ : Any = np.zeros((n + 1,) )
snake_case_ : List[Any] = ya
snake_case_ : List[Any] = xa
for k in range(__magic_name__ ):
snake_case_ : Optional[int] = f(__magic_name__ ,y[k] )
snake_case_ : Union[str, Any] = f(x + 0.5 * h ,y[k] + 0.5 * h * ka )
snake_case_ : int = f(x + 0.5 * h ,y[k] + 0.5 * h * ka )
snake_case_ : Optional[int] = f(x + h ,y[k] + h * ka )
snake_case_ : Dict = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 653 |
'''simple docstring'''
from collections import deque
from .hash_table import HashTable
class A_ (a_ ):
"""simple docstring"""
def __init__( self :List[str] , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :Dict ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self :Optional[int] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(lowerCAmelCase__ )
snake_case_ : Tuple = self.values[key]
def _A ( self :int ) -> Dict:
'''simple docstring'''
return (
sum(self.charge_factor - len(lowerCAmelCase__ ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def _A ( self :str , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple=None ) -> Any:
'''simple docstring'''
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(lowerCAmelCase__ ) == 0
):
return key
return super()._collision_resolution(lowerCAmelCase__ , lowerCAmelCase__ )
| 653 | 1 |
'''simple docstring'''
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCamelCase : Any = get_tests_dir('''fixtures/dummy-config.json''')
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :List[str] ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = 0
def _A ( self :Tuple ) -> int:
'''simple docstring'''
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) )
def _A ( self :str ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = AutoConfig.from_pretrained("bert-base-uncased" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def _A ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def _A ( self :List[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : List[Any] = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def _A ( self :Dict ) -> str:
'''simple docstring'''
snake_case_ : Tuple = AutoConfig.for_model("roberta" )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def _A ( self :List[str] ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
snake_case_ : Union[str, Any] = os.path.join(lowerCAmelCase__ , "fake-roberta" )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , "config.json" ) , "w" ) as f:
f.write(json.dumps({} ) )
snake_case_ : Any = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertEqual(type(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _A ( self :Optional[Any] ) -> List[Any]:
'''simple docstring'''
try:
AutoConfig.register("custom" , lowerCAmelCase__ )
# Wrong model type will raise an error
with self.assertRaises(lowerCAmelCase__ ):
AutoConfig.register("model" , lowerCAmelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase__ ):
AutoConfig.register("bert" , lowerCAmelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
snake_case_ : str = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase__ )
snake_case_ : str = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def _A ( self :List[str] ) -> Dict:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCAmelCase__ , "bert-base is not a local folder and is not a valid model identifier" ):
snake_case_ : Tuple = AutoConfig.from_pretrained("bert-base" )
def _A ( self :int ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCAmelCase__ , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
snake_case_ : Any = AutoConfig.from_pretrained(lowerCAmelCase__ , revision="aaaaaa" )
def _A ( self :Any ) -> Any:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCAmelCase__ , "hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." , ):
snake_case_ : Dict = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" )
def _A ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase__ ):
snake_case_ : Tuple = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCAmelCase__ ):
snake_case_ : Optional[int] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__ )
snake_case_ : List[str] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCAmelCase__ )
snake_case_ : List[Any] = AutoConfig.from_pretrained(lowerCAmelCase__ , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(reloaded_config.__class__.__name__ , "NewModelConfig" )
def _A ( self :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
class A_ (a_ ):
"""simple docstring"""
a__ = '''new-model'''
try:
AutoConfig.register("new-model" , lowerCAmelCase__ )
# If remote code is not set, the default is to use local
snake_case_ : Tuple = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote code is disabled, we load the local one.
snake_case_ : str = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote is enabled, we load from the Hub
snake_case_ : Any = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=lowerCAmelCase__ )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 653 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__lowerCamelCase : Dict = TypeVar('''KEY''')
__lowerCamelCase : int = TypeVar('''VAL''')
@dataclass(frozen=a_ , slots=a_ )
class A_ (Generic[KEY, VAL] ):
"""simple docstring"""
a__ = 42
a__ = 42
class A_ (_Item ):
"""simple docstring"""
def __init__( self :List[Any] ) -> None:
'''simple docstring'''
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def __bool__( self :Optional[int] ) -> bool:
'''simple docstring'''
return False
__lowerCamelCase : Dict = _DeletedItem()
class A_ (MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self :Dict , lowerCAmelCase__ :int = 8 , lowerCAmelCase__ :float = 0.7_5 ) -> None:
'''simple docstring'''
snake_case_ : Any = initial_block_size
snake_case_ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
snake_case_ : Tuple = capacity_factor
snake_case_ : List[Any] = 0
def _A ( self :Tuple , lowerCAmelCase__ :KEY ) -> int:
'''simple docstring'''
return hash(lowerCAmelCase__ ) % len(self._buckets )
def _A ( self :Any , lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def _A ( self :str , lowerCAmelCase__ :int , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> bool:
'''simple docstring'''
snake_case_ : Optional[int] = self._buckets[ind]
if not stored:
snake_case_ : int = _Item(lowerCAmelCase__ , lowerCAmelCase__ )
self._len += 1
return True
elif stored.key == key:
snake_case_ : Optional[int] = _Item(lowerCAmelCase__ , lowerCAmelCase__ )
return True
else:
return False
def _A ( self :int ) -> bool:
'''simple docstring'''
snake_case_ : Any = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCAmelCase__ )
def _A ( self :Any ) -> bool:
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
snake_case_ : Optional[int] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _A ( self :Tuple , lowerCAmelCase__ :int ) -> None:
'''simple docstring'''
snake_case_ : Tuple = self._buckets
snake_case_ : int = [None] * new_size
snake_case_ : Any = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _A ( self :Optional[int] ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def _A ( self :str ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def _A ( self :Optional[int] , lowerCAmelCase__ :KEY ) -> Iterator[int]:
'''simple docstring'''
snake_case_ : str = self._get_bucket_index(lowerCAmelCase__ )
for _ in range(len(self._buckets ) ):
yield ind
snake_case_ : List[Any] = self._get_next_ind(lowerCAmelCase__ )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(lowerCAmelCase__ ):
if self._try_set(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
break
def __setitem__( self :Optional[int] , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> None:
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(lowerCAmelCase__ , lowerCAmelCase__ )
def __delitem__( self :List[Any] , lowerCAmelCase__ :KEY ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(lowerCAmelCase__ ):
snake_case_ : int = self._buckets[ind]
if item is None:
raise KeyError(lowerCAmelCase__ )
if item is _deleted:
continue
if item.key == key:
snake_case_ : List[str] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self :List[str] , lowerCAmelCase__ :KEY ) -> VAL:
'''simple docstring'''
for ind in self._iterate_buckets(lowerCAmelCase__ ):
snake_case_ : Optional[Any] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCAmelCase__ )
def __len__( self :Optional[Any] ) -> int:
'''simple docstring'''
return self._len
def __iter__( self :List[Any] ) -> Iterator[KEY]:
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self :Any ) -> str:
'''simple docstring'''
snake_case_ : Dict = " ,".join(
F'''{item.key}: {item.val}''' for item in self._buckets if item )
return F'''HashMap({val_string})'''
| 653 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__lowerCamelCase : Dict = logging.get_logger(__name__)
class A_ (a_ ):
"""simple docstring"""
def __init__( self :Optional[int] , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :Optional[int] ) -> None:
'''simple docstring'''
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead." , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 653 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : str = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''gpt_bigcode'''
a__ = ['''past_key_values''']
a__ = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self :List[Any] , lowerCAmelCase__ :Any=50_257 , lowerCAmelCase__ :Dict=1_024 , lowerCAmelCase__ :Optional[int]=768 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :int=12 , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :List[str]="gelu_pytorch_tanh" , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Any=1E-5 , lowerCAmelCase__ :Union[str, Any]=0.0_2 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :int=50_256 , lowerCAmelCase__ :List[str]=50_256 , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :str=True , lowerCAmelCase__ :int=True , **lowerCAmelCase__ :Union[str, Any] , ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = vocab_size
snake_case_ : Any = n_positions
snake_case_ : Any = n_embd
snake_case_ : Optional[Any] = n_layer
snake_case_ : List[Any] = n_head
snake_case_ : Tuple = n_inner
snake_case_ : str = activation_function
snake_case_ : Union[str, Any] = resid_pdrop
snake_case_ : Optional[Any] = embd_pdrop
snake_case_ : Any = attn_pdrop
snake_case_ : List[Any] = layer_norm_epsilon
snake_case_ : Tuple = initializer_range
snake_case_ : int = scale_attn_weights
snake_case_ : Union[str, Any] = use_cache
snake_case_ : Dict = attention_softmax_in_fpaa
snake_case_ : Any = scale_attention_softmax_in_fpaa
snake_case_ : List[str] = multi_query
snake_case_ : List[str] = bos_token_id
snake_case_ : Any = eos_token_id
super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
| 653 | 1 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : int = logging.get_logger()
@dataclass
class A_ :
"""simple docstring"""
a__ = 42
a__ = field(default_factory=a_ )
a__ = field(default_factory=a_ )
def _A ( self :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tensor , lowerCAmelCase__ :Tensor ) -> int:
'''simple docstring'''
snake_case_ : int = len(list(m.modules() ) ) == 1 or isinstance(lowerCAmelCase__ , nn.Convad ) or isinstance(lowerCAmelCase__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowerCAmelCase__ )
def __call__( self :List[Any] , lowerCAmelCase__ :Tensor ) -> Union[str, Any]:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowerCAmelCase__ )
[x.remove() for x in self.handles]
return self
@property
def _A ( self :int ) -> List[Any]:
'''simple docstring'''
return list(filter(lambda lowerCAmelCase__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class A_ :
"""simple docstring"""
a__ = 42
a__ = 42
a__ = 0
a__ = field(default_factory=a_ )
a__ = field(default_factory=a_ )
def __call__( self :Tuple , lowerCAmelCase__ :Tensor ) -> Tuple:
'''simple docstring'''
snake_case_ : List[Any] = Tracker(self.dest )(lowerCAmelCase__ ).parametrized
snake_case_ : Tuple = Tracker(self.src )(lowerCAmelCase__ ).parametrized
snake_case_ : List[str] = list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.src_skip , lowerCAmelCase__ ) )
snake_case_ : Tuple = list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.dest_skip , lowerCAmelCase__ ) )
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise Exception(
F'''Numbers of operations are different. Source module has {len(lowerCAmelCase__ )} operations while'''
F''' destination module has {len(lowerCAmelCase__ )}.''' )
for dest_m, src_m in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ = True )-> Optional[int]:
"""simple docstring"""
print(F'''Converting {name}...''' )
with torch.no_grad():
snake_case_ : List[str] = timm.create_model(__magic_name__ ,pretrained=__magic_name__ ).eval()
snake_case_ : Optional[int] = ResNetForImageClassification(__magic_name__ ).eval()
snake_case_ : Dict = ModuleTransfer(src=__magic_name__ ,dest=__magic_name__ )
snake_case_ : Optional[int] = torch.randn((1, 3, 224, 224) )
module_transfer(__magic_name__ )
assert torch.allclose(from_model(__magic_name__ ) ,our_model(__magic_name__ ).logits ), "The model logits don't match the original one."
snake_case_ : str = F'''resnet{'-'.join(name.split('resnet' ) )}'''
print(__magic_name__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name ,commit_message="Add model" ,use_temp_dir=__magic_name__ ,)
# we can use the convnext one
snake_case_ : Optional[Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name ,commit_message="Add image processor" ,use_temp_dir=__magic_name__ ,)
print(F'''Pushed {checkpoint_name}''' )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = None ,__magic_name__ = True )-> Tuple:
"""simple docstring"""
snake_case_ : List[str] = "imagenet-1k-id2label.json"
snake_case_ : Optional[Any] = 1000
snake_case_ : List[Any] = (1, num_labels)
snake_case_ : Optional[Any] = "huggingface/label-files"
snake_case_ : Dict = num_labels
snake_case_ : List[Any] = json.load(open(hf_hub_download(__magic_name__ ,__magic_name__ ,repo_type="dataset" ) ,"r" ) )
snake_case_ : List[str] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case_ : Any = idalabel
snake_case_ : List[Any] = {v: k for k, v in idalabel.items()}
snake_case_ : Optional[int] = partial(__magic_name__ ,num_labels=__magic_name__ ,idalabel=__magic_name__ ,labelaid=__magic_name__ )
snake_case_ : Optional[int] = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] ,hidden_sizes=[64, 128, 256, 512] ,layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] ,hidden_sizes=[64, 128, 256, 512] ,layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(__magic_name__ ,names_to_config[model_name] ,__magic_name__ ,__magic_name__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
return config, expected_shape
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
__lowerCamelCase : Tuple = parser.parse_args()
__lowerCamelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 653 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
__lowerCamelCase : Union[str, Any] = logging.getLogger(__name__)
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Dict = git.Repo(search_parent_directories=__magic_name__ )
snake_case_ : Optional[int] = {
"repo_id": str(__magic_name__ ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
}
with open(os.path.join(__magic_name__ ,"git_log.json" ) ,"w" ) as f:
json.dump(__magic_name__ ,__magic_name__ ,indent=4 )
def __UpperCAmelCase ( __magic_name__ )-> Tuple:
"""simple docstring"""
if params.n_gpu <= 0:
snake_case_ : Any = 0
snake_case_ : Any = -1
snake_case_ : Tuple = True
snake_case_ : List[str] = False
return
assert torch.cuda.is_available()
logger.info("Initializing GPUs" )
if params.n_gpu > 1:
assert params.local_rank != -1
snake_case_ : Optional[int] = int(os.environ["WORLD_SIZE"] )
snake_case_ : int = int(os.environ["N_GPU_NODE"] )
snake_case_ : Any = int(os.environ["RANK"] )
# number of nodes / node ID
snake_case_ : Dict = params.world_size // params.n_gpu_per_node
snake_case_ : Optional[int] = params.global_rank // params.n_gpu_per_node
snake_case_ : Tuple = True
assert params.n_nodes == int(os.environ["N_NODES"] )
assert params.node_id == int(os.environ["NODE_RANK"] )
# local job (single GPU)
else:
assert params.local_rank == -1
snake_case_ : Optional[int] = 1
snake_case_ : str = 0
snake_case_ : List[Any] = 0
snake_case_ : int = 0
snake_case_ : Dict = 1
snake_case_ : Optional[Any] = 1
snake_case_ : str = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
snake_case_ : str = params.node_id == 0 and params.local_rank == 0
snake_case_ : str = params.n_nodes > 1
# summary
snake_case_ : str = F'''--- Global rank: {params.global_rank} - '''
logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes )
logger.info(PREFIX + "Node ID : %i" % params.node_id )
logger.info(PREFIX + "Local rank : %i" % params.local_rank )
logger.info(PREFIX + "World size : %i" % params.world_size )
logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node )
logger.info(PREFIX + "Master : %s" % str(params.is_master ) )
logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) )
logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) )
logger.info(PREFIX + "Hostname : %s" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("Initializing PyTorch distributed" )
torch.distributed.init_process_group(
init_method="env://" ,backend="nccl" ,)
def __UpperCAmelCase ( __magic_name__ )-> Dict:
"""simple docstring"""
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 653 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase : Dict = {
'''configuration_layoutlmv3''': [
'''LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LayoutLMv3Config''',
'''LayoutLMv3OnnxConfig''',
],
'''processing_layoutlmv3''': ['''LayoutLMv3Processor'''],
'''tokenization_layoutlmv3''': ['''LayoutLMv3Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = ['''LayoutLMv3TokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
'''LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv3ForQuestionAnswering''',
'''LayoutLMv3ForSequenceClassification''',
'''LayoutLMv3ForTokenClassification''',
'''LayoutLMv3Model''',
'''LayoutLMv3PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
'''TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLayoutLMv3ForQuestionAnswering''',
'''TFLayoutLMv3ForSequenceClassification''',
'''TFLayoutLMv3ForTokenClassification''',
'''TFLayoutLMv3Model''',
'''TFLayoutLMv3PreTrainedModel''',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = ['''LayoutLMv3FeatureExtractor''']
__lowerCamelCase : Union[str, Any] = ['''LayoutLMv3ImageProcessor''']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
__lowerCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 653 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class A_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self :Any , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict=7 , lowerCAmelCase__ :Union[str, Any]=3 , lowerCAmelCase__ :List[str]=30 , lowerCAmelCase__ :List[str]=400 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :str=True , lowerCAmelCase__ :Optional[int]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :Optional[int]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :str=True , lowerCAmelCase__ :int=1 / 255 , lowerCAmelCase__ :int=True , ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333}
snake_case_ : Dict = parent
snake_case_ : Union[str, Any] = batch_size
snake_case_ : Optional[Any] = num_channels
snake_case_ : str = min_resolution
snake_case_ : Dict = max_resolution
snake_case_ : Optional[Any] = do_resize
snake_case_ : str = size
snake_case_ : Optional[int] = do_normalize
snake_case_ : Dict = image_mean
snake_case_ : Optional[int] = image_std
snake_case_ : List[str] = do_rescale
snake_case_ : Dict = rescale_factor
snake_case_ : str = do_pad
def _A ( self :List[Any] ) -> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _A ( self :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str]=False ) -> str:
'''simple docstring'''
if not batched:
snake_case_ : List[str] = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image ):
snake_case_, snake_case_ : int = image.size
else:
snake_case_, snake_case_ : Any = image.shape[1], image.shape[2]
if w < h:
snake_case_ : int = int(self.size["shortest_edge"] * h / w )
snake_case_ : List[Any] = self.size["shortest_edge"]
elif w > h:
snake_case_ : Optional[int] = self.size["shortest_edge"]
snake_case_ : str = int(self.size["shortest_edge"] * w / h )
else:
snake_case_ : Tuple = self.size["shortest_edge"]
snake_case_ : Dict = self.size["shortest_edge"]
else:
snake_case_ : List[str] = []
for image in image_inputs:
snake_case_, snake_case_ : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ : str = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0]
snake_case_ : int = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = YolosImageProcessor if is_vision_available() else None
def _A ( self :Optional[Any] ) -> str:
'''simple docstring'''
snake_case_ : int = YolosImageProcessingTester(self )
@property
def _A ( self :List[str] ) -> Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _A ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "image_mean" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "image_std" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) )
def _A ( self :List[Any] ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
snake_case_ : Optional[int] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCAmelCase__ )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
def _A ( self :List[str] ) -> int:
'''simple docstring'''
pass
def _A ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
snake_case_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : int = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_, snake_case_ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
snake_case_ : Any = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _A ( self :Dict ) -> Dict:
'''simple docstring'''
snake_case_ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : List[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : Tuple = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : Dict = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _A ( self :Tuple ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
snake_case_ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : List[Any] = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _A ( self :Tuple ) -> Dict:
'''simple docstring'''
snake_case_ : str = self.image_processing_class(**self.image_processor_dict )
snake_case_ : List[Any] = self.image_processing_class(do_resize=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ , do_rescale=lowerCAmelCase__ )
# create random PyTorch tensors
snake_case_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
snake_case_ : Tuple = image_processing_a.pad(lowerCAmelCase__ , return_tensors="pt" )
snake_case_ : Union[str, Any] = image_processing_a(lowerCAmelCase__ , return_tensors="pt" )
self.assertTrue(
torch.allclose(encoded_images_with_method["pixel_values"] , encoded_images["pixel_values"] , atol=1E-4 ) )
@slow
def _A ( self :str ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case_ : int = json.loads(f.read() )
snake_case_ : Optional[int] = {"image_id": 39_769, "annotations": target}
# encode them
snake_case_ : Tuple = YolosImageProcessor.from_pretrained("hustvl/yolos-small" )
snake_case_ : Dict = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , return_tensors="pt" )
# verify pixel values
snake_case_ : Optional[int] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase__ )
snake_case_ : str = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
# verify area
snake_case_ : Dict = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase__ ) )
# verify boxes
snake_case_ : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase__ )
snake_case_ : Any = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase__ , atol=1E-3 ) )
# verify image_id
snake_case_ : Dict = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase__ ) )
# verify is_crowd
snake_case_ : int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase__ ) )
# verify class_labels
snake_case_ : List[str] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase__ ) )
# verify orig_size
snake_case_ : Any = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase__ ) )
# verify size
snake_case_ : List[Any] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase__ ) )
@slow
def _A ( self :Dict ) -> int:
'''simple docstring'''
snake_case_ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case_ : Optional[int] = json.loads(f.read() )
snake_case_ : Tuple = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target}
snake_case_ : Any = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case_ : int = YolosImageProcessor(format="coco_panoptic" )
snake_case_ : Union[str, Any] = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , masks_path=lowerCAmelCase__ , return_tensors="pt" )
# verify pixel values
snake_case_ : Optional[int] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase__ )
snake_case_ : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
# verify area
snake_case_ : int = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase__ ) )
# verify boxes
snake_case_ : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase__ )
snake_case_ : List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase__ , atol=1E-3 ) )
# verify image_id
snake_case_ : List[str] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase__ ) )
# verify is_crowd
snake_case_ : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase__ ) )
# verify class_labels
snake_case_ : str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase__ ) )
# verify masks
snake_case_ : Any = 822_873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCAmelCase__ )
# verify orig_size
snake_case_ : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase__ ) )
# verify size
snake_case_ : Union[str, Any] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase__ ) )
| 653 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class A_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self :int , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :List[str]=13 , lowerCAmelCase__ :str=7 , lowerCAmelCase__ :Any=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :Dict=99 , lowerCAmelCase__ :Tuple=32 , lowerCAmelCase__ :Optional[int]=5 , lowerCAmelCase__ :Dict=4 , lowerCAmelCase__ :Any=37 , lowerCAmelCase__ :int="gelu" , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :Union[str, Any]=512 , lowerCAmelCase__ :Tuple=16 , lowerCAmelCase__ :int=2 , lowerCAmelCase__ :List[str]=0.0_2 , lowerCAmelCase__ :Dict=4 , ) -> int:
'''simple docstring'''
snake_case_ : str = parent
snake_case_ : Any = batch_size
snake_case_ : Any = seq_length
snake_case_ : Dict = is_training
snake_case_ : List[Any] = use_attention_mask
snake_case_ : int = use_token_type_ids
snake_case_ : str = use_labels
snake_case_ : Dict = vocab_size
snake_case_ : str = hidden_size
snake_case_ : Any = num_hidden_layers
snake_case_ : Optional[int] = num_attention_heads
snake_case_ : Optional[int] = intermediate_size
snake_case_ : List[Any] = hidden_act
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : Tuple = attention_probs_dropout_prob
snake_case_ : Tuple = max_position_embeddings
snake_case_ : Tuple = type_vocab_size
snake_case_ : Optional[int] = type_sequence_label_size
snake_case_ : Any = initializer_range
snake_case_ : List[Any] = num_choices
def _A ( self :Any ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Optional[Any] = None
if self.use_attention_mask:
snake_case_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : List[Any] = None
if self.use_token_type_ids:
snake_case_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Dict = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _A ( self :List[str] ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = self.prepare_config_and_inputs()
snake_case_, snake_case_, snake_case_, snake_case_ : Optional[int] = config_and_inputs
snake_case_ : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = True
a__ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _A ( self :Union[str, Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : int = FlaxRoFormerModelTester(self )
@slow
def _A ( self :List[Any] ) -> List[str]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ : str = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=lowerCAmelCase__ )
snake_case_ : Optional[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase__ )
@require_flax
class A_ (unittest.TestCase ):
"""simple docstring"""
@slow
def _A ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
snake_case_ : Any = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
snake_case_ : int = jnp.array([[0, 1, 2, 3, 4, 5]] )
snake_case_ : Any = model(lowerCAmelCase__ )[0]
snake_case_ : Any = 50_000
snake_case_ : Dict = (1, 6, vocab_size)
self.assertEqual(output.shape , lowerCAmelCase__ )
snake_case_ : Dict = jnp.array(
[[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 653 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
if not isinstance(__magic_name__ ,__magic_name__ ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(__magic_name__ ,__magic_name__ ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
snake_case_ : Dict = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__magic_name__ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 653 | 1 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :Union[str, Any] ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = tf.convert_to_tensor(
[
[
8.2_2_2_0_9_9_1, # 3rd highest value; idx. 0
-0.5_6_2_0_0_4_4,
5.2_3_2_2_9_7_5_2,
4.0_3_8_6_3_9_3,
-6.8_7_9_8_3_7_8,
-0.5_4_7_8_5_8_0_2,
-3.2_0_1_2_1_5_3,
2.9_2_7_7_7_1_7_6,
1.8_8_1_7_1_9_5_3,
7.3_5_3_4_1_2_7_6, # 5th highest value; idx. 9
8.4_3_2_0_7_8_3_3, # 2nd highest value; idx. 10
-9.8_5_7_1_1_8_3_6,
-5.9_6_2_0_9_2_3_6,
-1.1_3_0_3_9_1_6_1,
-7.1_1_1_5_2_9_4,
-0.8_3_6_9_6_3_3,
-5.3_1_8_6_4_0_8,
7.0_6_4_2_7_4_0_7,
0.8_1_3_6_9_3_4_4,
-0.8_2_0_2_3_8_1_7,
-5.9_1_7_9_7_9_6,
0.5_8_8_1_3_4_4_3,
-6.9_9_7_7_8_4_3_8,
4.7_1_5_5_1_1_8_9,
-0.1_8_7_7_1_6_3_7,
7.4_4_0_2_0_7_5_9, # 4th highest value; idx. 25
9.3_8_4_5_0_9_8_7, # 1st highest value; idx. 26
2.1_2_6_6_2_9_4_1,
-9.3_2_5_6_2_0_3_8,
2.3_5_6_5_2_5_2_2,
], # cummulative prob of 5 highest values <= 0.6
[
0.5_8_4_2_5_5_1_8,
4.5_3_1_3_9_2_3_8,
-5.5_7_5_1_0_4_6_4,
-6.2_8_0_3_0_6_9_9,
-7.1_9_5_2_9_5_0_3,
-4.0_2_1_2_2_5_5_1,
1.3_9_3_3_7_0_3_7,
-6.0_6_7_0_7_0_5_7,
1.5_9_4_8_0_5_1_7,
-9.6_4_3_1_1_9,
0.0_3_9_0_7_7_9_9,
0.6_7_2_3_1_7_6_2,
-8.8_8_2_0_6_7_2_6,
6.2_7_1_1_5_9_2_2, # 4th highest value; idx. 13
2.2_8_5_2_0_7_2_3,
4.8_2_7_6_7_5_0_6,
4.3_0_4_2_1_3_6_8,
8.8_2_7_5_3_1_3, # 2nd highest value; idx. 17
5.4_4_0_2_9_9_5_8, # 5th highest value; idx. 18
-4.4_7_3_5_7_9_4,
7.3_8_5_7_9_5_3_6, # 3rd highest value; idx. 20
-2.9_1_0_5_1_6_6_3,
2.6_1_9_4_6_0_7_7,
-2.5_6_7_4_7_6_2,
-9.4_8_9_5_9_3_0_2,
-4.0_2_9_2_2_6_4_5,
-1.3_5_4_1_6_9_1_8,
9.6_7_7_0_2_3_2_3, # 1st highest value; idx. 27
-5.8_9_4_7_8_5_5_3,
1.8_5_3_7_0_4_6_7,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
snake_case_ : Optional[Any] = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
snake_case_ : List[str] = tf.convert_to_tensor(
[8.2_2_2_0_9_9, 7.3_5_3_4_1_2_6, 8.4_3_2_0_7_8, 7.4_4_0_2_0_7_5, 9.3_8_4_5_1, 6.2_7_1_1_5_9, 8.8_2_7_5_3_1, 5.4_4_0_2_9_9_5, 7.3_8_5_7_9_5_6, 9.6_7_7_0_2_3] , dtype=tf.floataa , ) # expected non filtered values as noted above
snake_case_ : Tuple = tf_top_k_top_p_filtering(lowerCAmelCase__ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
snake_case_ : Dict = output[output != -float("inf" )]
snake_case_ : List[str] = tf.cast(
tf.where(tf.not_equal(lowerCAmelCase__ , tf.constant(-float("inf" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(lowerCAmelCase__ , lowerCAmelCase__ , rtol=1E-1_2 )
tf.debugging.assert_equal(lowerCAmelCase__ , lowerCAmelCase__ )
@require_tf
class A_ (unittest.TestCase , a_ ):
"""simple docstring"""
if is_tf_available():
a__ = {
'''AutoModelForCausalLM''': TFAutoModelForCausalLM,
'''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq,
'''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM,
'''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq,
'''LogitsProcessorList''': TFLogitsProcessorList,
'''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor,
'''create_tensor_fn''': tf.convert_to_tensor,
'''floats_tensor''': floats_tensor,
'''return_tensors''': '''tf''',
}
@slow
def _A ( self :Any ) -> List[Any]:
'''simple docstring'''
snake_case_ : Tuple = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
snake_case_ : int = 2
snake_case_ : Optional[int] = 2
class A_ (tf.Module ):
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCAmelCase__ :Union[str, Any] ) -> Any:
'''simple docstring'''
super(lowerCAmelCase__ , self ).__init__()
snake_case_ : Any = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="input_ids" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="attention_mask" ),
) , jit_compile=lowerCAmelCase__ , )
def _A ( self :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.model.generate(
input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , max_new_tokens=lowerCAmelCase__ , return_dict_in_generate=lowerCAmelCase__ , )
return {"sequences": outputs["sequences"]}
snake_case_ : Optional[Any] = [[2, 0], [102, 103]]
snake_case_ : Union[str, Any] = [[1, 0], [1, 1]]
snake_case_ : int = DummyModel(model=lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(lowerCAmelCase__ , lowerCAmelCase__ , signatures={"serving_default": dummy_model.serving} )
snake_case_ : Optional[Any] = tf.saved_model.load(lowerCAmelCase__ ).signatures["serving_default"]
for batch_size in range(1 , len(lowerCAmelCase__ ) + 1 ):
snake_case_ : Dict = {
"input_ids": tf.constant(dummy_input_ids[:batch_size] ),
"attention_mask": tf.constant(dummy_attention_masks[:batch_size] ),
}
snake_case_ : Dict = serving_func(**lowerCAmelCase__ )["sequences"]
snake_case_ : Optional[Any] = test_model.generate(**lowerCAmelCase__ , max_new_tokens=lowerCAmelCase__ )
tf.debugging.assert_equal(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def _A ( self :Tuple ) -> int:
'''simple docstring'''
snake_case_ : List[str] = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
snake_case_ : Dict = 1
snake_case_ : Tuple = 2
class A_ (tf.Module ):
"""simple docstring"""
def __init__( self :int , lowerCAmelCase__ :Tuple ) -> Dict:
'''simple docstring'''
super(lowerCAmelCase__ , self ).__init__()
snake_case_ : Tuple = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="input_ids" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="attention_mask" ),
) , jit_compile=lowerCAmelCase__ , )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any] ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.model.generate(
input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , max_new_tokens=lowerCAmelCase__ , return_dict_in_generate=lowerCAmelCase__ , )
return {"sequences": outputs["sequences"]}
snake_case_ : Any = [[2], [102, 103]]
snake_case_ : Optional[int] = [[1], [1, 1]]
snake_case_ : Tuple = DummyModel(model=lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(lowerCAmelCase__ , lowerCAmelCase__ , signatures={"serving_default": dummy_model.serving} )
snake_case_ : Tuple = tf.saved_model.load(lowerCAmelCase__ ).signatures["serving_default"]
for input_row in range(len(lowerCAmelCase__ ) ):
snake_case_ : Optional[int] = {
"input_ids": tf.constant([dummy_input_ids[input_row]] ),
"attention_mask": tf.constant([dummy_attention_masks[input_row]] ),
}
snake_case_ : str = serving_func(**lowerCAmelCase__ )["sequences"]
snake_case_ : Optional[Any] = test_model.generate(**lowerCAmelCase__ , max_new_tokens=lowerCAmelCase__ )
tf.debugging.assert_equal(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
@require_tensorflow_text
def _A ( self :Dict ) -> Optional[int]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="google/flan-t5-small" , filename="spiece.model" , local_dir=lowerCAmelCase__ )
class A_ (tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self :Optional[Any] ) -> List[str]:
'''simple docstring'''
super().__init__()
snake_case_ : Union[str, Any] = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(lowerCAmelCase__ , "spiece.model" ) , "rb" ).read() )
snake_case_ : str = TFAutoModelForSeqaSeqLM.from_pretrained("hf-internal-testing/tiny-random-t5" )
def _A ( self :Dict , lowerCAmelCase__ :Any , *lowerCAmelCase__ :Tuple , **lowerCAmelCase__ :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = self.tokenizer.tokenize(lowerCAmelCase__ )
snake_case_, snake_case_ : List[str] = text.pad_model_inputs(
lowerCAmelCase__ , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
snake_case_ : List[Any] = self.model.generate(input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
return self.tokenizer.detokenize(lowerCAmelCase__ )
snake_case_ : int = CompleteSentenceTransformer()
snake_case_ : int = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="inputs" )
snake_case_ : int = complete_model(lowerCAmelCase__ )
snake_case_ : List[str] = tf.keras.Model(lowerCAmelCase__ , lowerCAmelCase__ )
keras_model.save(lowerCAmelCase__ )
def _A ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[Any] = {
"do_sample": True,
"num_beams": 1,
"top_p": 0.7,
"top_k": 10,
"temperature": 0.7,
}
snake_case_ : Union[str, Any] = 14
snake_case_ : Any = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
snake_case_ : List[Any] = "Hello, my dog is cute and"
snake_case_ : Union[str, Any] = tokenizer(lowerCAmelCase__ , return_tensors="tf" )
snake_case_ : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
snake_case_ : int = 638
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(":/CPU:0" ):
tf.random.set_seed(0 )
snake_case_ : Any = model.generate(**lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
snake_case_ : Tuple = [638, 198]
with tf.device(":/CPU:0" ):
tf.random.set_seed(0 )
snake_case_ : Any = model.generate(**lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def _A ( self :int ) -> Any:
'''simple docstring'''
snake_case_ : Any = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart" )
snake_case_ : List[str] = "Hugging Face is a technology company based in New York and Paris."
snake_case_ : List[Any] = bart_tokenizer(lowerCAmelCase__ , return_tensors="tf" ).input_ids
snake_case_ : int = TFBartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart" )
snake_case_ : Optional[Any] = bart_model.generate(lowerCAmelCase__ ).numpy()
class A_ (a_ ):
"""simple docstring"""
def _A ( self :Tuple , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[Any]=None , **lowerCAmelCase__ :Any ) -> List[Any]:
'''simple docstring'''
return super().call(lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case_ : int = FakeBart.from_pretrained("hf-internal-testing/tiny-random-bart" )
snake_case_ : List[Any] = bart_model.generate(lowerCAmelCase__ , foo="bar" ).numpy()
self.assertTrue(np.array_equal(lowerCAmelCase__ , lowerCAmelCase__ ) )
class A_ (bart_model.model.encoder.__class__ ):
"""simple docstring"""
def _A ( self :Optional[Any] , lowerCAmelCase__ :Union[str, Any] , **lowerCAmelCase__ :List[Any] ) -> List[Any]:
'''simple docstring'''
return super().call(lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case_ : int = FakeEncoder(bart_model.config , bart_model.model.shared )
snake_case_ : Union[str, Any] = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
snake_case_ : List[str] = bart_model.generate(lowerCAmelCase__ ).numpy()
with self.assertRaises(lowerCAmelCase__ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(lowerCAmelCase__ , foo="bar" )
| 653 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCamelCase : Tuple = 16
__lowerCamelCase : Optional[int] = 32
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = 16 )-> int:
"""simple docstring"""
snake_case_ : Optional[int] = AutoTokenizer.from_pretrained("bert-base-cased" )
snake_case_ : str = load_dataset("glue" ,"mrpc" )
def tokenize_function(__magic_name__ ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ : Dict = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=__magic_name__ ,max_length=__magic_name__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case_ : Any = datasets.map(
__magic_name__ ,batched=__magic_name__ ,remove_columns=["idx", "sentence1", "sentence2"] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ : List[Any] = tokenized_datasets.rename_column("label" ,"labels" )
def collate_fn(__magic_name__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case_ : int = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case_ : Tuple = 16
elif accelerator.mixed_precision != "no":
snake_case_ : str = 8
else:
snake_case_ : Optional[Any] = None
return tokenizer.pad(
__magic_name__ ,padding="longest" ,max_length=__magic_name__ ,pad_to_multiple_of=__magic_name__ ,return_tensors="pt" ,)
# Instantiate dataloaders.
snake_case_ : str = DataLoader(
tokenized_datasets["train"] ,shuffle=__magic_name__ ,collate_fn=__magic_name__ ,batch_size=__magic_name__ )
snake_case_ : Optional[Any] = DataLoader(
tokenized_datasets["validation"] ,shuffle=__magic_name__ ,collate_fn=__magic_name__ ,batch_size=__magic_name__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__lowerCamelCase : Optional[Any] = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Dict:
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" ,__magic_name__ ) == "1":
snake_case_ : List[str] = 2
# Initialize accelerator
snake_case_ : Union[str, Any] = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ : List[str] = config["lr"]
snake_case_ : Dict = int(config["num_epochs"] )
snake_case_ : Dict = int(config["seed"] )
snake_case_ : Optional[int] = int(config["batch_size"] )
snake_case_ : Dict = evaluate.load("glue" ,"mrpc" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__magic_name__ )
def inner_training_loop(__magic_name__ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__magic_name__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" ,return_dict=__magic_name__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case_ : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
snake_case_ : List[Any] = AdamW(params=model.parameters() ,lr=__magic_name__ )
snake_case_, snake_case_ : int = get_dataloaders(__magic_name__ ,__magic_name__ )
# Instantiate scheduler
snake_case_ : Tuple = get_linear_schedule_with_warmup(
optimizer=__magic_name__ ,num_warmup_steps=100 ,num_training_steps=(len(__magic_name__ ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : Tuple = accelerator.prepare(
__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
# Now we train the model
for epoch in range(__magic_name__ ):
model.train()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case_ : int = model(**__magic_name__ )
snake_case_ : Any = outputs.loss
accelerator.backward(__magic_name__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ : Union[str, Any] = model(**__magic_name__ )
snake_case_ : List[str] = outputs.logits.argmax(dim=-1 )
snake_case_, snake_case_ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=__magic_name__ ,references=__magic_name__ ,)
snake_case_ : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' ,__magic_name__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
snake_case_ : List[Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" ,type=__magic_name__ ,default=__magic_name__ ,choices=["no", "fp16", "bf16", "fp8"] ,help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." ,)
parser.add_argument("--cpu" ,action="store_true" ,help="If passed, will train on the CPU." )
snake_case_ : str = parser.parse_args()
snake_case_ : Optional[int] = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(__magic_name__ ,__magic_name__ )
if __name__ == "__main__":
main()
| 653 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : str = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''efficientnet'''
def __init__( self :Dict , lowerCAmelCase__ :int = 3 , lowerCAmelCase__ :int = 600 , lowerCAmelCase__ :float = 2.0 , lowerCAmelCase__ :float = 3.1 , lowerCAmelCase__ :int = 8 , lowerCAmelCase__ :List[int] = [3, 3, 5, 3, 5, 5, 3] , lowerCAmelCase__ :List[int] = [32, 16, 24, 40, 80, 112, 192] , lowerCAmelCase__ :List[int] = [16, 24, 40, 80, 112, 192, 320] , lowerCAmelCase__ :List[int] = [] , lowerCAmelCase__ :List[int] = [1, 2, 2, 2, 1, 2, 1] , lowerCAmelCase__ :List[int] = [1, 2, 2, 3, 3, 4, 1] , lowerCAmelCase__ :List[int] = [1, 6, 6, 6, 6, 6, 6] , lowerCAmelCase__ :float = 0.2_5 , lowerCAmelCase__ :str = "swish" , lowerCAmelCase__ :int = 2_560 , lowerCAmelCase__ :str = "mean" , lowerCAmelCase__ :float = 0.0_2 , lowerCAmelCase__ :float = 0.0_0_1 , lowerCAmelCase__ :float = 0.9_9 , lowerCAmelCase__ :float = 0.5 , lowerCAmelCase__ :float = 0.2 , **lowerCAmelCase__ :str , ) -> List[str]:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
snake_case_ : str = num_channels
snake_case_ : Union[str, Any] = image_size
snake_case_ : int = width_coefficient
snake_case_ : str = depth_coefficient
snake_case_ : Any = depth_divisor
snake_case_ : Union[str, Any] = kernel_sizes
snake_case_ : Dict = in_channels
snake_case_ : str = out_channels
snake_case_ : List[str] = depthwise_padding
snake_case_ : Optional[Any] = strides
snake_case_ : str = num_block_repeats
snake_case_ : Union[str, Any] = expand_ratios
snake_case_ : List[str] = squeeze_expansion_ratio
snake_case_ : List[str] = hidden_act
snake_case_ : Dict = hidden_dim
snake_case_ : List[str] = pooling_type
snake_case_ : Optional[Any] = initializer_range
snake_case_ : List[Any] = batch_norm_eps
snake_case_ : List[str] = batch_norm_momentum
snake_case_ : int = dropout_rate
snake_case_ : Dict = drop_connect_rate
snake_case_ : Union[str, Any] = sum(lowerCAmelCase__ ) * 4
class A_ (a_ ):
"""simple docstring"""
a__ = version.parse('''1.11''' )
@property
def _A ( self :Dict ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _A ( self :List[Any] ) -> float:
'''simple docstring'''
return 1E-5
| 653 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class A_ (a_ ):
"""simple docstring"""
a__ = '''facebook/bart-large-mnli'''
a__ = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
a__ = '''text_classifier'''
a__ = AutoTokenizer
a__ = AutoModelForSequenceClassification
a__ = ['''text''', ['''text''']]
a__ = ['''text''']
def _A ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
super().setup()
snake_case_ : Optional[int] = self.model.config
snake_case_ : Any = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
snake_case_ : Union[str, Any] = int(lowerCAmelCase__ )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def _A ( self :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :Tuple ) -> int:
'''simple docstring'''
snake_case_ : Tuple = labels
return self.pre_processor(
[text] * len(lowerCAmelCase__ ) , [F'''This example is {label}''' for label in labels] , return_tensors="pt" , padding="max_length" , )
def _A ( self :Any , lowerCAmelCase__ :str ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = outputs.logits
snake_case_ : Tuple = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 653 | 1 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__lowerCamelCase : Dict = TypeVar('''KEY''')
__lowerCamelCase : int = TypeVar('''VAL''')
@dataclass(frozen=a_ , slots=a_ )
class A_ (Generic[KEY, VAL] ):
"""simple docstring"""
a__ = 42
a__ = 42
class A_ (_Item ):
"""simple docstring"""
def __init__( self :List[Any] ) -> None:
'''simple docstring'''
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def __bool__( self :Optional[int] ) -> bool:
'''simple docstring'''
return False
__lowerCamelCase : Dict = _DeletedItem()
class A_ (MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self :Dict , lowerCAmelCase__ :int = 8 , lowerCAmelCase__ :float = 0.7_5 ) -> None:
'''simple docstring'''
snake_case_ : Any = initial_block_size
snake_case_ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
snake_case_ : Tuple = capacity_factor
snake_case_ : List[Any] = 0
def _A ( self :Tuple , lowerCAmelCase__ :KEY ) -> int:
'''simple docstring'''
return hash(lowerCAmelCase__ ) % len(self._buckets )
def _A ( self :Any , lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def _A ( self :str , lowerCAmelCase__ :int , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> bool:
'''simple docstring'''
snake_case_ : Optional[int] = self._buckets[ind]
if not stored:
snake_case_ : int = _Item(lowerCAmelCase__ , lowerCAmelCase__ )
self._len += 1
return True
elif stored.key == key:
snake_case_ : Optional[int] = _Item(lowerCAmelCase__ , lowerCAmelCase__ )
return True
else:
return False
def _A ( self :int ) -> bool:
'''simple docstring'''
snake_case_ : Any = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCAmelCase__ )
def _A ( self :Any ) -> bool:
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
snake_case_ : Optional[int] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _A ( self :Tuple , lowerCAmelCase__ :int ) -> None:
'''simple docstring'''
snake_case_ : Tuple = self._buckets
snake_case_ : int = [None] * new_size
snake_case_ : Any = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _A ( self :Optional[int] ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def _A ( self :str ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def _A ( self :Optional[int] , lowerCAmelCase__ :KEY ) -> Iterator[int]:
'''simple docstring'''
snake_case_ : str = self._get_bucket_index(lowerCAmelCase__ )
for _ in range(len(self._buckets ) ):
yield ind
snake_case_ : List[Any] = self._get_next_ind(lowerCAmelCase__ )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(lowerCAmelCase__ ):
if self._try_set(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
break
def __setitem__( self :Optional[int] , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> None:
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(lowerCAmelCase__ , lowerCAmelCase__ )
def __delitem__( self :List[Any] , lowerCAmelCase__ :KEY ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(lowerCAmelCase__ ):
snake_case_ : int = self._buckets[ind]
if item is None:
raise KeyError(lowerCAmelCase__ )
if item is _deleted:
continue
if item.key == key:
snake_case_ : List[str] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self :List[str] , lowerCAmelCase__ :KEY ) -> VAL:
'''simple docstring'''
for ind in self._iterate_buckets(lowerCAmelCase__ ):
snake_case_ : Optional[Any] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCAmelCase__ )
def __len__( self :Optional[Any] ) -> int:
'''simple docstring'''
return self._len
def __iter__( self :List[Any] ) -> Iterator[KEY]:
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self :Any ) -> str:
'''simple docstring'''
snake_case_ : Dict = " ,".join(
F'''{item.key}: {item.val}''' for item in self._buckets if item )
return F'''HashMap({val_string})'''
| 653 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase : Any = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''ViTFeatureExtractor''']
__lowerCamelCase : Any = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__lowerCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 653 | 1 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCamelCase : Optional[Any] = 16
__lowerCamelCase : List[Any] = 32
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = 16 )-> Optional[int]:
"""simple docstring"""
snake_case_ : Tuple = AutoTokenizer.from_pretrained("bert-base-cased" )
snake_case_ : List[str] = load_dataset("glue" ,"mrpc" )
def tokenize_function(__magic_name__ ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ : Union[str, Any] = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=__magic_name__ ,max_length=__magic_name__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case_ : List[Any] = datasets.map(
__magic_name__ ,batched=__magic_name__ ,remove_columns=["idx", "sentence1", "sentence2"] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ : int = tokenized_datasets.rename_column("label" ,"labels" )
def collate_fn(__magic_name__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case_ : int = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case_ : Union[str, Any] = 16
elif accelerator.mixed_precision != "no":
snake_case_ : Tuple = 8
else:
snake_case_ : Dict = None
return tokenizer.pad(
__magic_name__ ,padding="longest" ,max_length=__magic_name__ ,pad_to_multiple_of=__magic_name__ ,return_tensors="pt" ,)
# Instantiate dataloaders.
snake_case_ : List[str] = DataLoader(
tokenized_datasets["train"] ,shuffle=__magic_name__ ,collate_fn=__magic_name__ ,batch_size=__magic_name__ )
snake_case_ : List[Any] = DataLoader(
tokenized_datasets["validation"] ,shuffle=__magic_name__ ,collate_fn=__magic_name__ ,batch_size=__magic_name__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__lowerCamelCase : Tuple = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Optional[Any]:
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" ,__magic_name__ ) == "1":
snake_case_ : Tuple = 2
# New Code #
snake_case_ : List[Any] = int(args.gradient_accumulation_steps )
snake_case_ : Optional[Any] = int(args.local_sgd_steps )
# Initialize accelerator
snake_case_ : int = Accelerator(
cpu=args.cpu ,mixed_precision=args.mixed_precision ,gradient_accumulation_steps=__magic_name__ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ : int = config["lr"]
snake_case_ : Optional[Any] = int(config["num_epochs"] )
snake_case_ : int = int(config["seed"] )
snake_case_ : List[str] = int(config["batch_size"] )
snake_case_ : Any = evaluate.load("glue" ,"mrpc" )
set_seed(__magic_name__ )
snake_case_, snake_case_ : Dict = get_dataloaders(__magic_name__ ,__magic_name__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ : Any = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" ,return_dict=__magic_name__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case_ : Optional[Any] = model.to(accelerator.device )
# Instantiate optimizer
snake_case_ : int = AdamW(params=model.parameters() ,lr=__magic_name__ )
# Instantiate scheduler
snake_case_ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=__magic_name__ ,num_warmup_steps=100 ,num_training_steps=(len(__magic_name__ ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : List[str] = accelerator.prepare(
__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
# Now we train the model
for epoch in range(__magic_name__ ):
model.train()
with LocalSGD(
accelerator=__magic_name__ ,model=__magic_name__ ,local_sgd_steps=__magic_name__ ,enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__magic_name__ ):
snake_case_ : List[Any] = model(**__magic_name__ )
snake_case_ : Any = output.loss
accelerator.backward(__magic_name__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ : Optional[Any] = model(**__magic_name__ )
snake_case_ : int = outputs.logits.argmax(dim=-1 )
snake_case_, snake_case_ : Tuple = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=__magic_name__ ,references=__magic_name__ ,)
snake_case_ : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' ,__magic_name__ )
def __UpperCAmelCase ( )-> str:
"""simple docstring"""
snake_case_ : List[str] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" ,type=__magic_name__ ,default=__magic_name__ ,choices=["no", "fp16", "bf16", "fp8"] ,help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." ,)
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" ,type=__magic_name__ ,default=1 ,help="The number of minibatches to be ran before gradients are accumulated." ,)
parser.add_argument(
"--local_sgd_steps" ,type=__magic_name__ ,default=8 ,help="Number of local SGD steps or None to disable local SGD" )
parser.add_argument("--cpu" ,action="store_true" ,help="If passed, will train on the CPU." )
snake_case_ : List[Any] = parser.parse_args()
snake_case_ : str = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(__magic_name__ ,__magic_name__ )
if __name__ == "__main__":
main()
| 653 |
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A_ :
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str]=2 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :List[str]=7 , lowerCAmelCase__ :Any=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :List[str]=99 , lowerCAmelCase__ :Union[str, Any]=36 , lowerCAmelCase__ :Dict=3 , lowerCAmelCase__ :str=4 , lowerCAmelCase__ :Optional[int]=37 , lowerCAmelCase__ :Dict="gelu" , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :Optional[int]=512 , lowerCAmelCase__ :Union[str, Any]=16 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :Any=0.0_2 , lowerCAmelCase__ :Dict=6 , lowerCAmelCase__ :Optional[int]=6 , lowerCAmelCase__ :Any=3 , lowerCAmelCase__ :int=4 , lowerCAmelCase__ :int=None , lowerCAmelCase__ :Any=1_000 , ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = parent
snake_case_ : Union[str, Any] = batch_size
snake_case_ : Optional[int] = num_channels
snake_case_ : List[Any] = image_size
snake_case_ : Optional[int] = patch_size
snake_case_ : Union[str, Any] = text_seq_length
snake_case_ : Dict = is_training
snake_case_ : Optional[Any] = use_input_mask
snake_case_ : Union[str, Any] = use_token_type_ids
snake_case_ : Dict = use_labels
snake_case_ : List[str] = vocab_size
snake_case_ : Optional[Any] = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : List[str] = intermediate_size
snake_case_ : str = hidden_act
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Optional[int] = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = max_position_embeddings
snake_case_ : List[Any] = type_vocab_size
snake_case_ : Union[str, Any] = type_sequence_label_size
snake_case_ : List[Any] = initializer_range
snake_case_ : Union[str, Any] = coordinate_size
snake_case_ : int = shape_size
snake_case_ : Tuple = num_labels
snake_case_ : List[Any] = num_choices
snake_case_ : List[str] = scope
snake_case_ : Dict = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
snake_case_ : str = text_seq_length
snake_case_ : Optional[int] = (image_size // patch_size) ** 2 + 1
snake_case_ : str = self.text_seq_length + self.image_seq_length
def _A ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
snake_case_ : str = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case_ : Optional[Any] = bbox[i, j, 3]
snake_case_ : Any = bbox[i, j, 1]
snake_case_ : Tuple = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case_ : str = bbox[i, j, 2]
snake_case_ : Dict = bbox[i, j, 0]
snake_case_ : Union[str, Any] = t
snake_case_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : Dict = None
if self.use_input_mask:
snake_case_ : str = random_attention_mask([self.batch_size, self.text_seq_length] )
snake_case_ : Any = None
if self.use_token_type_ids:
snake_case_ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
snake_case_ : Union[str, Any] = None
snake_case_ : str = None
if self.use_labels:
snake_case_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
snake_case_ : str = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _A ( self :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = LayoutLMvaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
# text + image
snake_case_ : Tuple = model(lowerCAmelCase__ , pixel_values=lowerCAmelCase__ )
snake_case_ : Optional[int] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
snake_case_ : Optional[int] = model(lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
snake_case_ : int = model(lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
snake_case_ : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
snake_case_ : Union[str, Any] = model(pixel_values=lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _A ( self :str , lowerCAmelCase__ :str , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = self.num_labels
snake_case_ : List[Any] = LayoutLMvaForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : Optional[int] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = self.num_labels
snake_case_ : str = LayoutLMvaForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : List[Any] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _A ( self :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :str ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = LayoutLMvaForQuestionAnswering(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : List[Any] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A ( self :int ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) : Optional[Any] = config_and_inputs
snake_case_ : Tuple = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class A_ (a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a__ = False
a__ = False
a__ = False
a__ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
a__ = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def _A ( self :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[Any] ) -> List[str]:
'''simple docstring'''
return True
def _A ( self :List[Any] ) -> str:
'''simple docstring'''
snake_case_ : Tuple = LayoutLMvaModelTester(self )
snake_case_ : Optional[int] = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def _A ( self :Tuple , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any]=False ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = copy.deepcopy(lowerCAmelCase__ )
if model_class in get_values(lowerCAmelCase__ ):
snake_case_ : Optional[Any] = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(lowerCAmelCase__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCAmelCase__ ):
snake_case_ : Union[str, Any] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
elif model_class in get_values(lowerCAmelCase__ ):
snake_case_ : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
snake_case_ : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
elif model_class in [
*get_values(lowerCAmelCase__ ),
]:
snake_case_ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
elif model_class in [
*get_values(lowerCAmelCase__ ),
]:
snake_case_ : List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowerCAmelCase__ , )
return inputs_dict
def _A ( self :Any ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def _A ( self :int ) -> int:
'''simple docstring'''
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _A ( self :Any ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ : int = type
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _A ( self :int ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def _A ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
def _A ( self :int ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
@slow
def _A ( self :Tuple ) -> List[Any]:
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : str = LayoutLMvaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
snake_case_ : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class A_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def _A ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase__ ) if is_vision_available() else None
@slow
def _A ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(lowerCAmelCase__ )
snake_case_ : Optional[Any] = self.default_image_processor
snake_case_ : Optional[int] = prepare_img()
snake_case_ : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).pixel_values.to(lowerCAmelCase__ )
snake_case_ : List[str] = torch.tensor([[1, 2]] )
snake_case_ : Any = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
snake_case_ : Any = model(
input_ids=input_ids.to(lowerCAmelCase__ ) , bbox=bbox.to(lowerCAmelCase__ ) , pixel_values=pixel_values.to(lowerCAmelCase__ ) , )
# verify the logits
snake_case_ : Optional[Any] = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase__ )
snake_case_ : str = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 653 | 1 |
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {'''vocab_file''': '''spiece.model'''}
__lowerCamelCase : Dict = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
__lowerCamelCase : List[str] = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
__lowerCamelCase : Tuple = '''▁'''
class A_ (a_ ):
"""simple docstring"""
a__ = VOCAB_FILES_NAMES
a__ = PRETRAINED_VOCAB_FILES_MAP
a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = ['''input_ids''', '''attention_mask''']
def __init__( self :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple="</s>" , lowerCAmelCase__ :Optional[int]="<unk>" , lowerCAmelCase__ :Tuple="<pad>" , lowerCAmelCase__ :int=100 , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :Optional[Dict[str, Any]] = None , lowerCAmelCase__ :str=True , **lowerCAmelCase__ :Union[str, Any] , ) -> None:
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
snake_case_ : Tuple = [F'''<extra_id_{i}>''' for i in range(lowerCAmelCase__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
snake_case_ : List[str] = len(set(filter(lambda lowerCAmelCase__ : bool("extra_id" in str(lowerCAmelCase__ ) ) , lowerCAmelCase__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
if legacy:
logger.warning_once(
F'''You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'''
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565" )
snake_case_ : Union[str, Any] = legacy
snake_case_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , extra_ids=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , legacy=lowerCAmelCase__ , **lowerCAmelCase__ , )
snake_case_ : str = vocab_file
snake_case_ : str = extra_ids
snake_case_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase__ )
@staticmethod
def _A ( lowerCAmelCase__ :Any , lowerCAmelCase__ :int , lowerCAmelCase__ :Any ) -> List[str]:
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
snake_case_ : str = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , lowerCAmelCase__ , )
return max_model_length
@property
def _A ( self :int ) -> Optional[int]:
'''simple docstring'''
return self.sp_model.get_piece_size() + self._extra_ids
def _A ( self :Tuple ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _A ( self :int , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None , lowerCAmelCase__ :bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowerCAmelCase__ )) + [1]
return ([0] * len(lowerCAmelCase__ )) + [1] + ([0] * len(lowerCAmelCase__ )) + [1]
def _A ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
return list(
set(filter(lambda lowerCAmelCase__ : bool(re.search(r"<extra_id_\d+>" , lowerCAmelCase__ ) ) is not None , self.additional_special_tokens ) ) )
def _A ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return [self._convert_token_to_id(lowerCAmelCase__ ) for token in self.get_sentinel_tokens()]
def _A ( self :int , lowerCAmelCase__ :List[int] ) -> List[int]:
'''simple docstring'''
if len(lowerCAmelCase__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def _A ( self :List[Any] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case_ : Dict = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _A ( self :str , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
snake_case_ : str = self._add_eos_if_not_present(lowerCAmelCase__ )
if token_ids_a is None:
return token_ids_a
else:
snake_case_ : List[Any] = self._add_eos_if_not_present(lowerCAmelCase__ )
return token_ids_a + token_ids_a
def __getstate__( self :List[Any] ) -> int:
'''simple docstring'''
snake_case_ : int = self.__dict__.copy()
snake_case_ : List[str] = None
return state
def __setstate__( self :int , lowerCAmelCase__ :Dict ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case_ : int = {}
snake_case_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _A ( self :str , lowerCAmelCase__ :"TextInput" , **lowerCAmelCase__ :Optional[int] ) -> List[str]:
'''simple docstring'''
if not self.legacy:
snake_case_ : str = SPIECE_UNDERLINE + text.replace(lowerCAmelCase__ , " " )
return super().tokenize(lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self :List[str] , lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :Optional[int] ) -> Any:
'''simple docstring'''
if not self.legacy:
snake_case_ : Optional[int] = text.startswith(lowerCAmelCase__ )
if is_first:
snake_case_ : int = text[1:]
snake_case_ : Optional[int] = self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
if not self.legacy and not is_first and not text.startswith(" " ) and tokens[0].startswith(lowerCAmelCase__ ):
snake_case_ : Union[str, Any] = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def _A ( self :int , lowerCAmelCase__ :Tuple ) -> List[str]:
'''simple docstring'''
if token.startswith("<extra_id_" ):
snake_case_ : Union[str, Any] = re.match(r"<extra_id_(\d+)>" , lowerCAmelCase__ )
snake_case_ : int = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(lowerCAmelCase__ )
def _A ( self :List[Any] , lowerCAmelCase__ :int ) -> Optional[int]:
'''simple docstring'''
if index < self.sp_model.get_piece_size():
snake_case_ : Any = self.sp_model.IdToPiece(lowerCAmelCase__ )
else:
snake_case_ : List[str] = F'''<extra_id_{self.vocab_size - 1 - index}>'''
return token
def _A ( self :List[str] , lowerCAmelCase__ :str ) -> int:
'''simple docstring'''
snake_case_ : Tuple = []
snake_case_ : Dict = ""
snake_case_ : List[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase__ ) + token
snake_case_ : List[str] = True
snake_case_ : Dict = []
else:
current_sub_tokens.append(lowerCAmelCase__ )
snake_case_ : int = False
out_string += self.sp_model.decode(lowerCAmelCase__ )
return out_string.strip()
def _A ( self :int , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case_ : Optional[Any] = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , "wb" ) as fi:
snake_case_ : int = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
| 653 |
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __UpperCAmelCase ( __magic_name__ )-> int: # picklable for multiprocessing
"""simple docstring"""
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
snake_case_ : str = [1, 2, 3]
with pytest.raises(__magic_name__ ):
with parallel_backend("unsupported backend" ):
map_nested(__magic_name__ ,__magic_name__ ,num_proc=2 )
with pytest.raises(__magic_name__ ):
with parallel_backend("unsupported backend" ):
map_nested(__magic_name__ ,__magic_name__ ,num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" ,[2, -1] )
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = [1, 2]
snake_case_ : Union[str, Any] = {"a": 1, "b": 2}
snake_case_ : str = {"a": [1, 2], "b": [3, 4]}
snake_case_ : List[str] = {"a": {"1": 1}, "b": 2}
snake_case_ : Optional[int] = {"a": 1, "b": 2, "c": 3, "d": 4}
snake_case_ : Tuple = [2, 3]
snake_case_ : str = {"a": 2, "b": 3}
snake_case_ : Dict = {"a": [2, 3], "b": [4, 5]}
snake_case_ : List[Any] = {"a": {"1": 2}, "b": 3}
snake_case_ : str = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
| 653 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : Any = {
'''configuration_longformer''': [
'''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LongformerConfig''',
'''LongformerOnnxConfig''',
],
'''tokenization_longformer''': ['''LongformerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''LongformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
'''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongformerForMaskedLM''',
'''LongformerForMultipleChoice''',
'''LongformerForQuestionAnswering''',
'''LongformerForSequenceClassification''',
'''LongformerForTokenClassification''',
'''LongformerModel''',
'''LongformerPreTrainedModel''',
'''LongformerSelfAttention''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLongformerForMaskedLM''',
'''TFLongformerForMultipleChoice''',
'''TFLongformerForQuestionAnswering''',
'''TFLongformerForSequenceClassification''',
'''TFLongformerForTokenClassification''',
'''TFLongformerModel''',
'''TFLongformerPreTrainedModel''',
'''TFLongformerSelfAttention''',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
__lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 653 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Dict = logging.get_logger(__name__)
# TODO Update this
__lowerCamelCase : int = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''esm'''
def __init__( self :Dict , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :str=None , lowerCAmelCase__ :int=768 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :Dict=12 , lowerCAmelCase__ :Union[str, Any]=3_072 , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :List[Any]=1_026 , lowerCAmelCase__ :int=0.0_2 , lowerCAmelCase__ :Optional[int]=1E-1_2 , lowerCAmelCase__ :List[str]="absolute" , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :List[str]=False , lowerCAmelCase__ :List[Any]=False , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :str=None , **lowerCAmelCase__ :Union[str, Any] , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , mask_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case_ : str = vocab_size
snake_case_ : str = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : List[str] = num_attention_heads
snake_case_ : Any = intermediate_size
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Tuple = attention_probs_dropout_prob
snake_case_ : List[Any] = max_position_embeddings
snake_case_ : str = initializer_range
snake_case_ : List[Any] = layer_norm_eps
snake_case_ : str = position_embedding_type
snake_case_ : Optional[int] = use_cache
snake_case_ : str = emb_layer_norm_before
snake_case_ : List[Any] = token_dropout
snake_case_ : str = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
snake_case_ : Optional[Any] = EsmFoldConfig()
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : Union[str, Any] = EsmFoldConfig(**lowerCAmelCase__ )
snake_case_ : Optional[Any] = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
snake_case_ : List[str] = get_default_vocab_list()
else:
snake_case_ : List[str] = vocab_list
else:
snake_case_ : List[Any] = None
snake_case_ : int = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , lowerCAmelCase__ ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def _A ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case_ : Any = super().to_dict()
if isinstance(self.esmfold_config , lowerCAmelCase__ ):
snake_case_ : Optional[int] = self.esmfold_config.to_dict()
return output
@dataclass
class A_ :
"""simple docstring"""
a__ = None
a__ = True
a__ = False
a__ = False
a__ = False
a__ = 0
a__ = True
a__ = False
a__ = 128
a__ = None
def _A ( self :Dict ) -> int:
'''simple docstring'''
if self.trunk is None:
snake_case_ : Dict = TrunkConfig()
elif isinstance(self.trunk , lowerCAmelCase__ ):
snake_case_ : int = TrunkConfig(**self.trunk )
def _A ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = asdict(self )
snake_case_ : Optional[int] = self.trunk.to_dict()
return output
@dataclass
class A_ :
"""simple docstring"""
a__ = 48
a__ = 1024
a__ = 128
a__ = 32
a__ = 32
a__ = 32
a__ = 0
a__ = 0
a__ = False
a__ = 4
a__ = 128
a__ = None
def _A ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
if self.structure_module is None:
snake_case_ : Optional[int] = StructureModuleConfig()
elif isinstance(self.structure_module , lowerCAmelCase__ ):
snake_case_ : List[str] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
snake_case_ : Dict = self.sequence_state_dim // self.sequence_head_width
snake_case_ : Optional[int] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def _A ( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : int = asdict(self )
snake_case_ : Dict = self.structure_module.to_dict()
return output
@dataclass
class A_ :
"""simple docstring"""
a__ = 384
a__ = 128
a__ = 16
a__ = 128
a__ = 12
a__ = 4
a__ = 8
a__ = 0.1
a__ = 8
a__ = 1
a__ = 2
a__ = 7
a__ = 10
a__ = 1E-8
a__ = 1E5
def _A ( self :Dict ) -> Dict:
'''simple docstring'''
return asdict(self )
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 653 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :int ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = 10
def _A ( self :Union[str, Any] ) -> int:
'''simple docstring'''
snake_case_ : List[str] = [1, 2, 3, 4]
snake_case_ : Dict = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(lowerCAmelCase__ , self.block_size , 0 ) , lowerCAmelCase__ )
def _A ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
snake_case_ : List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowerCAmelCase__ , self.block_size , 0 ) , lowerCAmelCase__ )
def _A ( self :int ) -> Dict:
'''simple docstring'''
snake_case_ : Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
snake_case_ : int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowerCAmelCase__ , self.block_size , 0 ) , lowerCAmelCase__ )
def _A ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."
snake_case_, snake_case_ : List[Any] = process_story(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , [] )
def _A ( self :Optional[Any] ) -> int:
'''simple docstring'''
snake_case_ : int = ""
snake_case_, snake_case_ : Union[str, Any] = process_story(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , [] )
self.assertEqual(lowerCAmelCase__ , [] )
def _A ( self :Optional[int] ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = (
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
snake_case_, snake_case_ : Optional[Any] = process_story(lowerCAmelCase__ )
snake_case_ : Any = [
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Tuple = ["It was the best of times."]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _A ( self :Tuple ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = torch.tensor([1, 2, 3, 4] )
snake_case_ : Union[str, Any] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(lowerCAmelCase__ , 0 ).numpy() , expected.numpy() )
def _A ( self :List[Any] ) -> int:
'''simple docstring'''
snake_case_ : Union[str, Any] = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
snake_case_ : Tuple = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowerCAmelCase__ , 23 ).numpy() , expected.numpy() )
def _A ( self :Any ) -> Tuple:
'''simple docstring'''
snake_case_ : Union[str, Any] = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
snake_case_ : Tuple = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowerCAmelCase__ , 1 ).numpy() , expected.numpy() )
def _A ( self :str ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = 101
snake_case_ : Tuple = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
snake_case_ : List[Any] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
snake_case_ : List[str] = compute_token_type_ids(lowerCAmelCase__ , lowerCAmelCase__ )
np.testing.assert_array_equal(lowerCAmelCase__ , lowerCAmelCase__ )
| 653 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : Any = {
'''configuration_longformer''': [
'''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LongformerConfig''',
'''LongformerOnnxConfig''',
],
'''tokenization_longformer''': ['''LongformerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''LongformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
'''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongformerForMaskedLM''',
'''LongformerForMultipleChoice''',
'''LongformerForQuestionAnswering''',
'''LongformerForSequenceClassification''',
'''LongformerForTokenClassification''',
'''LongformerModel''',
'''LongformerPreTrainedModel''',
'''LongformerSelfAttention''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLongformerForMaskedLM''',
'''TFLongformerForMultipleChoice''',
'''TFLongformerForQuestionAnswering''',
'''TFLongformerForSequenceClassification''',
'''TFLongformerForTokenClassification''',
'''TFLongformerModel''',
'''TFLongformerPreTrainedModel''',
'''TFLongformerSelfAttention''',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
__lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 653 | 1 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
__lowerCamelCase : Any = '''3'''
print('''Python version:''', sys.version)
print('''transformers version:''', transformers.__version__)
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
print('''NCCL version:''', torch.cuda.nccl.version())
except ImportError:
print('''Torch version:''', None)
try:
import deepspeed
print('''DeepSpeed version:''', deepspeed.__version__)
except ImportError:
print('''DeepSpeed version:''', None)
try:
import tensorflow as tf
print('''TensorFlow version:''', tf.__version__)
print('''TF GPUs available:''', bool(tf.config.list_physical_devices('''GPU''')))
print('''Number of TF GPUs available:''', len(tf.config.list_physical_devices('''GPU''')))
except ImportError:
print('''TensorFlow version:''', None)
| 653 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__lowerCamelCase : Optional[int] = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class A_ :
"""simple docstring"""
def __init__( self :Tuple , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[Any]=16 , lowerCAmelCase__ :Any=13 , lowerCAmelCase__ :Optional[Any]=7 , lowerCAmelCase__ :str=14 , lowerCAmelCase__ :Union[str, Any]=10 , lowerCAmelCase__ :Tuple=19 , lowerCAmelCase__ :Optional[Any]=5 , lowerCAmelCase__ :Dict=4 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Any=16 , lowerCAmelCase__ :str=2 , lowerCAmelCase__ :List[Any]=4 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :str="gelu" , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :Optional[int]=[1, 2, 3, 4, 5] , lowerCAmelCase__ :str=25 , lowerCAmelCase__ :Optional[Any]=5 , ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = d_model
snake_case_ : Dict = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : Optional[Any] = prediction_length
snake_case_ : str = context_length
snake_case_ : Tuple = cardinality
snake_case_ : List[str] = num_time_features
snake_case_ : Optional[Any] = lags_sequence
snake_case_ : Union[str, Any] = embedding_dimension
snake_case_ : Optional[Any] = is_training
snake_case_ : Optional[Any] = hidden_size
snake_case_ : Any = num_hidden_layers
snake_case_ : Optional[Any] = num_attention_heads
snake_case_ : int = intermediate_size
snake_case_ : Any = hidden_act
snake_case_ : Union[str, Any] = hidden_dropout_prob
snake_case_ : Dict = attention_probs_dropout_prob
snake_case_ : List[str] = context_length
snake_case_ : Any = prediction_length + label_length
snake_case_ : Union[str, Any] = label_length
snake_case_ : List[Any] = moving_average
snake_case_ : str = autocorrelation_factor
def _A ( self :List[Any] ) -> Any:
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case_ : Any = config.context_length + max(config.lags_sequence )
snake_case_ : Union[str, Any] = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
snake_case_ : Optional[int] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
snake_case_ : List[Any] = floats_tensor([self.batch_size, _past_length] )
snake_case_ : Dict = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
snake_case_ : List[Any] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
snake_case_ : List[Any] = floats_tensor([self.batch_size, config.prediction_length] )
snake_case_ : int = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def _A ( self :Dict ) -> Tuple:
'''simple docstring'''
snake_case_ : str = self.get_config()
snake_case_ : int = self.prepare_autoformer_inputs_dict(lowerCAmelCase__ )
return config, inputs_dict
def _A ( self :Optional[int] ) -> Dict:
'''simple docstring'''
snake_case_, snake_case_ : Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def _A ( self :Tuple , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = AutoformerModel(config=lowerCAmelCase__ ).to(lowerCAmelCase__ ).eval()
snake_case_ : Optional[int] = model(**lowerCAmelCase__ )
snake_case_ : Any = outputs.encoder_last_hidden_state
snake_case_ : Dict = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Optional[Any] = model.get_encoder()
encoder.save_pretrained(lowerCAmelCase__ )
snake_case_ : Tuple = AutoformerEncoder.from_pretrained(lowerCAmelCase__ ).to(lowerCAmelCase__ )
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : List[str] = model.create_network_inputs(**lowerCAmelCase__ )
snake_case_, snake_case_ : Optional[int] = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
snake_case_ : List[Any] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
snake_case_ : Optional[int] = encoder(inputs_embeds=lowerCAmelCase__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
snake_case_ : Any = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
snake_case_ : List[str] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
snake_case_ : Optional[Any] = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
snake_case_ : Any = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : List[Any] = model.get_decoder()
decoder.save_pretrained(lowerCAmelCase__ )
snake_case_ : int = AutoformerDecoder.from_pretrained(lowerCAmelCase__ ).to(lowerCAmelCase__ )
snake_case_ : Tuple = decoder(
trend=lowerCAmelCase__ , inputs_embeds=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class A_ (a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a__ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
a__ = (AutoformerForPrediction,) if is_torch_available() else ()
a__ = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def _A ( self :Dict ) -> int:
'''simple docstring'''
snake_case_ : Tuple = AutoformerModelTester(self )
snake_case_ : str = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def _A ( self :List[str] ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def _A ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_, snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
snake_case_ : List[Any] = model_class(lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase__ )
snake_case_, snake_case_ : str = model_class.from_pretrained(lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
self.assertEqual(info["missing_keys"] , [] )
def _A ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
snake_case_ : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*lowerCAmelCase__ )
@unittest.skip(reason="Model has no tokens embeddings" )
def _A ( self :str ) -> str:
'''simple docstring'''
pass
def _A ( self :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[Any] = inspect.signature(getattr(lowerCAmelCase__ , "forward" ) )
# The main input is the name of the argument after `self`
snake_case_ : Dict = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , lowerCAmelCase__ )
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_, snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Tuple = model_class(lowerCAmelCase__ )
snake_case_ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : Optional[Any] = [*signature.parameters.keys()]
snake_case_ : Dict = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(lowerCAmelCase__ )] , lowerCAmelCase__ )
def _A ( self :int ) -> Any:
'''simple docstring'''
snake_case_, snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Union[str, Any] = True
snake_case_ : List[str] = getattr(self.model_tester , "seq_length" , lowerCAmelCase__ )
snake_case_ : Dict = getattr(self.model_tester , "decoder_seq_length" , lowerCAmelCase__ )
snake_case_ : Union[str, Any] = getattr(self.model_tester , "encoder_seq_length" , lowerCAmelCase__ )
snake_case_ : Union[str, Any] = getattr(self.model_tester , "d_model" , lowerCAmelCase__ )
snake_case_ : Dict = getattr(self.model_tester , "num_attention_heads" , lowerCAmelCase__ )
snake_case_ : Optional[int] = d_model // num_attention_heads
for model_class in self.all_model_classes:
snake_case_ : Any = True
snake_case_ : Any = False
snake_case_ : Dict = True
snake_case_ : List[str] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case_ : Tuple = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
snake_case_ : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case_ : Optional[int] = True
snake_case_ : Any = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case_ : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
snake_case_ : str = outputs.encoder_attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
snake_case_ : Tuple = len(lowerCAmelCase__ )
snake_case_ : List[str] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# decoder attentions
snake_case_ : Optional[int] = outputs.decoder_attentions
self.assertIsInstance(lowerCAmelCase__ , (list, tuple) )
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
snake_case_ : List[Any] = outputs.cross_attentions
self.assertIsInstance(lowerCAmelCase__ , (list, tuple) )
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
snake_case_ : Optional[int] = True
snake_case_ : List[Any] = True
snake_case_ : Union[str, Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case_ : List[Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(out_len + 2 , len(lowerCAmelCase__ ) )
snake_case_ : Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def _A ( self :Any ) -> Optional[Any]:
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def __UpperCAmelCase ( __magic_name__="train-batch.pt" )-> int:
"""simple docstring"""
snake_case_ : List[str] = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" ,filename=__magic_name__ ,repo_type="dataset" )
snake_case_ : List[str] = torch.load(__magic_name__ ,map_location=__magic_name__ )
return batch
@require_torch
@slow
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :str ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ )
snake_case_ : List[str] = prepare_batch()
with torch.no_grad():
snake_case_ : int = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
snake_case_ : Optional[int] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , lowerCAmelCase__ )
snake_case_ : Optional[Any] = torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=lowerCAmelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
def _A ( self :Any ) -> str:
'''simple docstring'''
snake_case_ : str = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ )
snake_case_ : Optional[Any] = prepare_batch("val-batch.pt" )
with torch.no_grad():
snake_case_ : Tuple = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
snake_case_ : Dict = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , lowerCAmelCase__ )
snake_case_ : Any = torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=lowerCAmelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
def _A ( self :List[str] ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ )
snake_case_ : str = prepare_batch("val-batch.pt" )
with torch.no_grad():
snake_case_ : Optional[Any] = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
snake_case_ : List[Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , lowerCAmelCase__ )
snake_case_ : Dict = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=lowerCAmelCase__ )
snake_case_ : Optional[Any] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , lowerCAmelCase__ , rtol=1E-1 ) )
| 653 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
if not isinstance(__magic_name__ ,__magic_name__ ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(__magic_name__ ,__magic_name__ ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
snake_case_ : Dict = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__magic_name__ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 653 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = RobertaTokenizer
a__ = RobertaTokenizerFast
a__ = True
a__ = {'''cls_token''': '''<s>'''}
def _A ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case_ : List[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
snake_case_ : Tuple = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
snake_case_ : List[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
snake_case_ : int = {"unk_token": "<unk>"}
snake_case_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
def _A ( self :Optional[Any] , **lowerCAmelCase__ :str ) -> str:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _A ( self :Any , **lowerCAmelCase__ :Tuple ) -> Optional[int]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _A ( self :Optional[int] , lowerCAmelCase__ :str ) -> Optional[int]:
'''simple docstring'''
snake_case_ : int = "lower newer"
snake_case_ : Tuple = "lower newer"
return input_text, output_text
def _A ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case_ : Dict = "lower newer"
snake_case_ : int = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
snake_case_ : str = tokenizer.tokenize(lowerCAmelCase__ ) # , add_prefix_space=True)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[str] = tokens + [tokenizer.unk_token]
snake_case_ : Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _A ( self :Any ) -> str:
'''simple docstring'''
snake_case_ : List[str] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=lowerCAmelCase__ ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=lowerCAmelCase__ ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def _A ( self :str ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = self.tokenizer_class.from_pretrained("roberta-base" )
snake_case_ : List[str] = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase__ )
snake_case_ : List[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase__ )
snake_case_ : List[str] = tokenizer.encode(
"sequence builders" , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
snake_case_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
snake_case_ : Any = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _A ( self :List[Any] ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = self.get_tokenizer()
snake_case_ : Tuple = "Encode this sequence."
snake_case_ : Optional[Any] = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
snake_case_ : str = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
snake_case_ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
snake_case_ : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
snake_case_ : str = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
snake_case_ : int = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Testing spaces after special tokens
snake_case_ : List[Any] = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )} ) # mask token has a left space
snake_case_ : str = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
snake_case_ : List[str] = "Encode <mask> sequence"
snake_case_ : List[Any] = "Encode <mask>sequence"
snake_case_ : Tuple = tokenizer.encode(lowerCAmelCase__ )
snake_case_ : int = encoded.index(lowerCAmelCase__ )
snake_case_ : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[str] = tokenizer.encode(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = encoded.index(lowerCAmelCase__ )
snake_case_ : int = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _A ( self :Tuple ) -> Tuple:
'''simple docstring'''
pass
def _A ( self :int ) -> Optional[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case_ : List[Any] = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case_ : Any = "A, <mask> AllenNLP sentence."
snake_case_ : str = tokenizer_r.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
snake_case_ : int = tokenizer_p.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
snake_case_ : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
snake_case_ : str = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def _A ( self :int ) -> Tuple:
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
snake_case_ : str = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
snake_case_ : Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , lowerCAmelCase__ )
self.assertEqual(post_processor_state["add_prefix_space"] , lowerCAmelCase__ )
self.assertEqual(post_processor_state["trim_offsets"] , lowerCAmelCase__ )
def _A ( self :List[str] ) -> List[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ : str = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
snake_case_ : Tuple = F'''{text_of_1_token} {text_of_1_token}'''
snake_case_ : Any = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : List[str] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Tuple = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ), len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : Dict = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : str = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ), len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : Tuple = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
snake_case_ : Dict = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ) + 1, 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : Any = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Any = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ), 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Optional[int] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ), 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
| 653 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCamelCase : str = {
'''configuration_rag''': ['''RagConfig'''],
'''retrieval_rag''': ['''RagRetriever'''],
'''tokenization_rag''': ['''RagTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
'''RagModel''',
'''RagPreTrainedModel''',
'''RagSequenceForGeneration''',
'''RagTokenForGeneration''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
'''TFRagModel''',
'''TFRagPreTrainedModel''',
'''TFRagSequenceForGeneration''',
'''TFRagTokenForGeneration''',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
__lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 653 |
'''simple docstring'''
import math
def __UpperCAmelCase ( __magic_name__ )-> bool:
"""simple docstring"""
snake_case_ : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__magic_name__ )
def __UpperCAmelCase ( __magic_name__ = 1 / 1_2345 )-> int:
"""simple docstring"""
snake_case_ : Any = 0
snake_case_ : int = 0
snake_case_ : Union[str, Any] = 3
while True:
snake_case_ : Any = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__magic_name__ ):
snake_case_ : Optional[Any] = int(__magic_name__ )
total_partitions += 1
if check_partition_perfect(__magic_name__ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__magic_name__ )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 653 | 1 |
'''simple docstring'''
import requests
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> None:
"""simple docstring"""
snake_case_ : Union[str, Any] = {"Content-Type": "application/json"}
snake_case_ : str = requests.post(__magic_name__ ,json={"text": message_body} ,headers=__magic_name__ )
if response.status_code != 200:
snake_case_ : List[str] = (
"Request to slack returned an error "
F'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(__magic_name__ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('''<YOUR MESSAGE BODY>''', '''<SLACK CHANNEL URL>''')
| 653 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : int = logging.get_logger()
@dataclass
class A_ :
"""simple docstring"""
a__ = 42
a__ = field(default_factory=a_ )
a__ = field(default_factory=a_ )
def _A ( self :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tensor , lowerCAmelCase__ :Tensor ) -> int:
'''simple docstring'''
snake_case_ : int = len(list(m.modules() ) ) == 1 or isinstance(lowerCAmelCase__ , nn.Convad ) or isinstance(lowerCAmelCase__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowerCAmelCase__ )
def __call__( self :List[Any] , lowerCAmelCase__ :Tensor ) -> Union[str, Any]:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowerCAmelCase__ )
[x.remove() for x in self.handles]
return self
@property
def _A ( self :int ) -> List[Any]:
'''simple docstring'''
return list(filter(lambda lowerCAmelCase__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class A_ :
"""simple docstring"""
a__ = 42
a__ = 42
a__ = 0
a__ = field(default_factory=a_ )
a__ = field(default_factory=a_ )
def __call__( self :Tuple , lowerCAmelCase__ :Tensor ) -> Tuple:
'''simple docstring'''
snake_case_ : List[Any] = Tracker(self.dest )(lowerCAmelCase__ ).parametrized
snake_case_ : Tuple = Tracker(self.src )(lowerCAmelCase__ ).parametrized
snake_case_ : List[str] = list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.src_skip , lowerCAmelCase__ ) )
snake_case_ : Tuple = list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.dest_skip , lowerCAmelCase__ ) )
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise Exception(
F'''Numbers of operations are different. Source module has {len(lowerCAmelCase__ )} operations while'''
F''' destination module has {len(lowerCAmelCase__ )}.''' )
for dest_m, src_m in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ = True )-> Optional[int]:
"""simple docstring"""
print(F'''Converting {name}...''' )
with torch.no_grad():
snake_case_ : List[str] = timm.create_model(__magic_name__ ,pretrained=__magic_name__ ).eval()
snake_case_ : Optional[int] = ResNetForImageClassification(__magic_name__ ).eval()
snake_case_ : Dict = ModuleTransfer(src=__magic_name__ ,dest=__magic_name__ )
snake_case_ : Optional[int] = torch.randn((1, 3, 224, 224) )
module_transfer(__magic_name__ )
assert torch.allclose(from_model(__magic_name__ ) ,our_model(__magic_name__ ).logits ), "The model logits don't match the original one."
snake_case_ : str = F'''resnet{'-'.join(name.split('resnet' ) )}'''
print(__magic_name__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name ,commit_message="Add model" ,use_temp_dir=__magic_name__ ,)
# we can use the convnext one
snake_case_ : Optional[Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name ,commit_message="Add image processor" ,use_temp_dir=__magic_name__ ,)
print(F'''Pushed {checkpoint_name}''' )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = None ,__magic_name__ = True )-> Tuple:
"""simple docstring"""
snake_case_ : List[str] = "imagenet-1k-id2label.json"
snake_case_ : Optional[Any] = 1000
snake_case_ : List[Any] = (1, num_labels)
snake_case_ : Optional[Any] = "huggingface/label-files"
snake_case_ : Dict = num_labels
snake_case_ : List[Any] = json.load(open(hf_hub_download(__magic_name__ ,__magic_name__ ,repo_type="dataset" ) ,"r" ) )
snake_case_ : List[str] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case_ : Any = idalabel
snake_case_ : List[Any] = {v: k for k, v in idalabel.items()}
snake_case_ : Optional[int] = partial(__magic_name__ ,num_labels=__magic_name__ ,idalabel=__magic_name__ ,labelaid=__magic_name__ )
snake_case_ : Optional[int] = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] ,hidden_sizes=[64, 128, 256, 512] ,layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] ,hidden_sizes=[64, 128, 256, 512] ,layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(__magic_name__ ,names_to_config[model_name] ,__magic_name__ ,__magic_name__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
return config, expected_shape
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
__lowerCamelCase : Tuple = parser.parse_args()
__lowerCamelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 653 | 1 |
'''simple docstring'''
import math
__lowerCamelCase : Optional[Any] = 10
__lowerCamelCase : List[Any] = 7
__lowerCamelCase : Optional[int] = BALLS_PER_COLOUR * NUM_COLOURS
def __UpperCAmelCase ( __magic_name__ = 20 )-> str:
"""simple docstring"""
snake_case_ : Dict = math.comb(__magic_name__ ,__magic_name__ )
snake_case_ : List[str] = math.comb(NUM_BALLS - BALLS_PER_COLOUR ,__magic_name__ )
snake_case_ : str = NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 653 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''roc_bert'''
def __init__( self :Dict , lowerCAmelCase__ :Optional[Any]=30_522 , lowerCAmelCase__ :Dict=768 , lowerCAmelCase__ :str=12 , lowerCAmelCase__ :Optional[int]=12 , lowerCAmelCase__ :Optional[Any]=3_072 , lowerCAmelCase__ :Any="gelu" , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :List[str]=512 , lowerCAmelCase__ :int=2 , lowerCAmelCase__ :Optional[int]=0.0_2 , lowerCAmelCase__ :Tuple=1E-1_2 , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :List[str]=0 , lowerCAmelCase__ :Optional[Any]="absolute" , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :List[str]=768 , lowerCAmelCase__ :Optional[Any]=910 , lowerCAmelCase__ :str=512 , lowerCAmelCase__ :int=24_858 , lowerCAmelCase__ :List[Any]=True , **lowerCAmelCase__ :int , ) -> List[str]:
'''simple docstring'''
snake_case_ : int = vocab_size
snake_case_ : Dict = max_position_embeddings
snake_case_ : int = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : int = intermediate_size
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Optional[int] = hidden_dropout_prob
snake_case_ : List[Any] = attention_probs_dropout_prob
snake_case_ : Dict = initializer_range
snake_case_ : str = type_vocab_size
snake_case_ : Tuple = layer_norm_eps
snake_case_ : Optional[Any] = use_cache
snake_case_ : Optional[Any] = enable_pronunciation
snake_case_ : List[Any] = enable_shape
snake_case_ : Optional[int] = pronunciation_embed_dim
snake_case_ : Dict = pronunciation_vocab_size
snake_case_ : int = shape_embed_dim
snake_case_ : Any = shape_vocab_size
snake_case_ : Optional[int] = concat_input
snake_case_ : List[Any] = position_embedding_type
snake_case_ : Any = classifier_dropout
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
| 653 | 1 |
'''simple docstring'''
from collections import defaultdict
from math import ceil, sqrt
def __UpperCAmelCase ( __magic_name__ = 100_0000 ,__magic_name__ = 10 )-> int:
"""simple docstring"""
snake_case_ : defaultdict = defaultdict(__magic_name__ )
for outer_width in range(3 ,(t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
snake_case_ : str = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) ,1 )
else:
snake_case_ : List[Any] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(__magic_name__ ,outer_width - 1 ,2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 653 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
def update_area_of_max_square(__magic_name__ ,__magic_name__ ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
snake_case_ : str = update_area_of_max_square(__magic_name__ ,col + 1 )
snake_case_ : Dict = update_area_of_max_square(row + 1 ,col + 1 )
snake_case_ : int = update_area_of_max_square(row + 1 ,__magic_name__ )
if mat[row][col]:
snake_case_ : str = 1 + min([right, diagonal, down] )
snake_case_ : Tuple = max(largest_square_area[0] ,__magic_name__ )
return sub_problem_sol
else:
return 0
snake_case_ : Union[str, Any] = [0]
update_area_of_max_square(0 ,0 )
return largest_square_area[0]
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
__magic_name__ ,__magic_name__ ,__magic_name__ ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
snake_case_ : Dict = update_area_of_max_square_using_dp_array(__magic_name__ ,col + 1 ,__magic_name__ )
snake_case_ : List[Any] = update_area_of_max_square_using_dp_array(row + 1 ,col + 1 ,__magic_name__ )
snake_case_ : Any = update_area_of_max_square_using_dp_array(row + 1 ,__magic_name__ ,__magic_name__ )
if mat[row][col]:
snake_case_ : int = 1 + min([right, diagonal, down] )
snake_case_ : Tuple = max(largest_square_area[0] ,__magic_name__ )
snake_case_ : Optional[Any] = sub_problem_sol
return sub_problem_sol
else:
return 0
snake_case_ : List[Any] = [0]
snake_case_ : Optional[int] = [[-1] * cols for _ in range(__magic_name__ )]
update_area_of_max_square_using_dp_array(0 ,0 ,__magic_name__ )
return largest_square_area[0]
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
snake_case_ : Dict = [[0] * (cols + 1) for _ in range(rows + 1 )]
snake_case_ : Dict = 0
for row in range(rows - 1 ,-1 ,-1 ):
for col in range(cols - 1 ,-1 ,-1 ):
snake_case_ : List[str] = dp_array[row][col + 1]
snake_case_ : Any = dp_array[row + 1][col + 1]
snake_case_ : Any = dp_array[row + 1][col]
if mat[row][col] == 1:
snake_case_ : Any = 1 + min(__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : str = max(dp_array[row][col] ,__magic_name__ )
else:
snake_case_ : Optional[Any] = 0
return largest_square_area
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
snake_case_ : str = [0] * (cols + 1)
snake_case_ : Tuple = [0] * (cols + 1)
snake_case_ : List[str] = 0
for row in range(rows - 1 ,-1 ,-1 ):
for col in range(cols - 1 ,-1 ,-1 ):
snake_case_ : Optional[Any] = current_row[col + 1]
snake_case_ : Optional[int] = next_row[col + 1]
snake_case_ : Dict = next_row[col]
if mat[row][col] == 1:
snake_case_ : Union[str, Any] = 1 + min(__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : Any = max(current_row[col] ,__magic_name__ )
else:
snake_case_ : Dict = 0
snake_case_ : Optional[Any] = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 653 | 1 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
__lowerCamelCase : str = re.compile(R'''\s+''')
def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]:
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(__magic_name__ ,"" ,example["content"] ).encode("utf-8" ) ).hexdigest()}
def __UpperCAmelCase ( __magic_name__ )-> Dict:
"""simple docstring"""
snake_case_ : Optional[Any] = [len(__magic_name__ ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(__magic_name__ ), "line_max": max(__magic_name__ )}
def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]:
"""simple docstring"""
snake_case_ : Union[str, Any] = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Union[str, Any]:
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=5 )-> List[Any]:
"""simple docstring"""
snake_case_ : Any = ["auto-generated", "autogenerated", "automatically generated"]
snake_case_ : Any = example["content"].splitlines()
for _, line in zip(range(__magic_name__ ) ,__magic_name__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=5 ,__magic_name__=0.05 )-> List[str]:
"""simple docstring"""
snake_case_ : List[Any] = ["unit tests", "test file", "configuration file"]
snake_case_ : Union[str, Any] = example["content"].splitlines()
snake_case_ : Any = 0
snake_case_ : Optional[Any] = 0
# first test
for _, line in zip(range(__magic_name__ ) ,__magic_name__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
snake_case_ : str = example["content"].count("\n" )
snake_case_ : Any = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = ["def ", "class ", "for ", "while "]
snake_case_ : str = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=4 )-> Optional[int]:
"""simple docstring"""
snake_case_ : Union[str, Any] = example["content"].splitlines()
snake_case_ : Optional[int] = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def __UpperCAmelCase ( __magic_name__ )-> Tuple:
"""simple docstring"""
snake_case_ : Optional[int] = tokenizer(example["content"] ,truncation=__magic_name__ )["input_ids"]
snake_case_ : List[str] = len(example["content"] ) / len(__magic_name__ )
return {"ratio": ratio}
def __UpperCAmelCase ( __magic_name__ )-> Optional[int]:
"""simple docstring"""
snake_case_ : Optional[int] = {}
results.update(get_hash(__magic_name__ ) )
results.update(line_stats(__magic_name__ ) )
results.update(alpha_stats(__magic_name__ ) )
results.update(char_token_ratio(__magic_name__ ) )
results.update(is_autogenerated(__magic_name__ ) )
results.update(is_config_or_test(__magic_name__ ) )
results.update(has_no_keywords(__magic_name__ ) )
results.update(has_few_assignments(__magic_name__ ) )
return results
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Union[str, Any]:
"""simple docstring"""
if not check_uniques(__magic_name__ ,__magic_name__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def __UpperCAmelCase ( __magic_name__ )-> Optional[int]:
"""simple docstring"""
with open(__magic_name__ ,"rb" ) as f_in:
with gzip.open(str(__magic_name__ ) + ".gz" ,"wb" ,compresslevel=6 ) as f_out:
shutil.copyfileobj(__magic_name__ ,__magic_name__ )
os.unlink(__magic_name__ )
# Settings
__lowerCamelCase : List[str] = HfArgumentParser(PreprocessingArguments)
__lowerCamelCase : List[Any] = parser.parse_args()
if args.num_workers is None:
__lowerCamelCase : List[Any] = multiprocessing.cpu_count()
__lowerCamelCase : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
__lowerCamelCase : Union[str, Any] = time.time()
__lowerCamelCase : int = load_dataset(args.dataset_name, split='''train''')
print(f'''Time to load dataset: {time.time()-t_start:.2f}''')
# Run preprocessing
__lowerCamelCase : int = time.time()
__lowerCamelCase : Optional[int] = ds.map(preprocess, num_proc=args.num_workers)
print(f'''Time to preprocess dataset: {time.time()-t_start:.2f}''')
# Deduplicate hashes
__lowerCamelCase : Tuple = set(ds.unique('''hash'''))
__lowerCamelCase : Optional[Any] = len(uniques) / len(ds)
print(f'''Fraction of duplicates: {1-frac:.2%}''')
# Deduplicate data and apply heuristics
__lowerCamelCase : str = time.time()
__lowerCamelCase : Any = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(f'''Time to filter dataset: {time.time()-t_start:.2f}''')
print(f'''Size of filtered dataset: {len(ds_filter)}''')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
__lowerCamelCase : List[Any] = time.time()
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f'''Time to deduplicate dataset: {time.time()-t_start:.2f}''')
print(f'''Size of deduplicate dataset: {len(ds_filter)}''')
# Save data in batches of samples_per_file
__lowerCamelCase : List[Any] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
__lowerCamelCase : str = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
__lowerCamelCase : int = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
__lowerCamelCase : Optional[int] = str(data_dir / f'''file-{file_number+1:012}.json''')
__lowerCamelCase : Tuple = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f'''Time to save dataset: {time.time()-t_start:.2f}''')
| 653 |
'''simple docstring'''
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=7 )-> Tuple:
"""simple docstring"""
snake_case_ : List[str] = None
if token is not None:
snake_case_ : List[str] = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
snake_case_ : Dict = "636036"
snake_case_ : List[str] = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
snake_case_ : Optional[Any] = requests.get(__magic_name__ ,headers=__magic_name__ ).json()
return result["workflow_runs"]
def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]:
"""simple docstring"""
snake_case_ : str = get_daily_ci_runs(__magic_name__ )
snake_case_ : Optional[int] = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
snake_case_ : Dict = workflow_run["id"]
break
return workflow_run_id
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = get_last_daily_ci_runs(__magic_name__ )
if workflow_run_id is not None:
snake_case_ : Union[str, Any] = get_artifacts_links(worflow_run_id=__magic_name__ ,token=__magic_name__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
snake_case_ : Union[str, Any] = artifacts_links[artifact_name]
download_artifact(
artifact_name=__magic_name__ ,artifact_url=__magic_name__ ,output_dir=__magic_name__ ,token=__magic_name__ )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[Any]:
"""simple docstring"""
get_last_daily_ci_artifacts(__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : Union[str, Any] = {}
for artifact_name in artifact_names:
snake_case_ : Any = os.path.join(__magic_name__ ,F'''{artifact_name}.zip''' )
if os.path.isfile(__magic_name__ ):
snake_case_ : Tuple = {}
with zipfile.ZipFile(__magic_name__ ) as z:
for filename in z.namelist():
if not os.path.isdir(__magic_name__ ):
# read the file
with z.open(__magic_name__ ) as f:
snake_case_ : Optional[Any] = f.read().decode("UTF-8" )
return results
| 653 | 1 |
'''simple docstring'''
from jiwer import compute_measures
import datasets
__lowerCamelCase : Optional[Any] = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
__lowerCamelCase : Dict = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
__lowerCamelCase : str = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ (datasets.Metric ):
"""simple docstring"""
def _A ( self :Tuple ) -> str:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
] , )
def _A ( self :Dict , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :Tuple=False ) -> Tuple:
'''simple docstring'''
if concatenate_texts:
return compute_measures(lowerCAmelCase__ , lowerCAmelCase__ )["wer"]
else:
snake_case_ : int = 0
snake_case_ : str = 0
for prediction, reference in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : Union[str, Any] = compute_measures(lowerCAmelCase__ , lowerCAmelCase__ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 653 |
'''simple docstring'''
from string import ascii_uppercase
__lowerCamelCase : Optional[Any] = {char: i for i, char in enumerate(ascii_uppercase)}
__lowerCamelCase : List[str] = dict(enumerate(ascii_uppercase))
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Tuple = len(__magic_name__ )
snake_case_ : str = 0
while True:
if x == i:
snake_case_ : List[str] = 0
if len(__magic_name__ ) == len(__magic_name__ ):
break
key += key[i]
i += 1
return key
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : str = ""
snake_case_ : List[Any] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
snake_case_ : Optional[Any] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Dict = ""
snake_case_ : Dict = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
snake_case_ : str = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __UpperCAmelCase ( )-> None:
"""simple docstring"""
snake_case_ : List[str] = "THE GERMAN ATTACK"
snake_case_ : List[str] = "SECRET"
snake_case_ : Optional[int] = generate_key(__magic_name__ ,__magic_name__ )
snake_case_ : Any = cipher_text(__magic_name__ ,__magic_name__ )
print(F'''Encrypted Text = {s}''' )
print(F'''Original Text = {original_text(__magic_name__ ,__magic_name__ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 653 | 1 |
'''simple docstring'''
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
__lowerCamelCase : List[str] = logging.get_logger(__name__)
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Optional[Any]:
"""simple docstring"""
def run_func(__magic_name__ ):
@wraps(__magic_name__ )
def run_in_eager_mode(*__magic_name__ ,**__magic_name__ ):
return func(*__magic_name__ ,**__magic_name__ )
@wraps(__magic_name__ )
@tf.function(experimental_compile=__magic_name__ )
def run_in_graph_mode(*__magic_name__ ,**__magic_name__ ):
return func(*__magic_name__ ,**__magic_name__ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> ["tf.Tensor"]:
"""simple docstring"""
snake_case_ : List[Any] = random.Random()
snake_case_ : Union[str, Any] = [rng.randint(0 ,vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(__magic_name__ ,shape=(batch_size, sequence_length) ,dtype=tf.intaa )
class A_ (a_ ):
"""simple docstring"""
a__ = 42
a__ = 42
a__ = "TensorFlow"
@property
def _A ( self :Optional[int] ) -> str:
'''simple docstring'''
return tf.__version__
def _A ( self :Tuple , lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> float:
'''simple docstring'''
snake_case_ : Tuple = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
snake_case_ : int = self._prepare_inference_func(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return self._measure_speed(_inference )
def _A ( self :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> float:
'''simple docstring'''
snake_case_ : int = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
snake_case_ : Optional[Any] = self._prepare_train_func(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return self._measure_speed(_train )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> [Memory, Optional[MemorySummary]]:
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCAmelCase__ )
snake_case_ : Tuple = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
snake_case_ : List[str] = self._prepare_inference_func(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return self._measure_memory(_inference )
def _A ( self :Any , lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> [Memory, Optional[MemorySummary]]:
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCAmelCase__ )
snake_case_ : Tuple = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
snake_case_ : Dict = self._prepare_train_func(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return self._measure_memory(_train )
def _A ( self :str , lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> Callable[[], None]:
'''simple docstring'''
snake_case_ : List[str] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
snake_case_ : List[Any] = (
hasattr(lowerCAmelCase__ , "architectures" )
and isinstance(config.architectures , lowerCAmelCase__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
snake_case_ : Dict = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
snake_case_ : Dict = __import__("transformers" , fromlist=[model_class] )
snake_case_ : Any = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Union[str, Any] = model_cls(lowerCAmelCase__ )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
snake_case_ : Dict = TF_MODEL_MAPPING[config.__class__](lowerCAmelCase__ )
# encoder-decoder has vocab size saved differently
snake_case_ : List[str] = config.vocab_size if hasattr(lowerCAmelCase__ , "vocab_size" ) else config.encoder.vocab_size
snake_case_ : List[str] = random_input_ids(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(lowerCAmelCase__ , decoder_input_ids=lowerCAmelCase__ , training=lowerCAmelCase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(lowerCAmelCase__ , training=lowerCAmelCase__ )
snake_case_ : int = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def _A ( self :int , lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> Callable[[], None]:
'''simple docstring'''
snake_case_ : Dict = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
snake_case_ : Optional[Any] = (
hasattr(lowerCAmelCase__ , "architectures" )
and isinstance(config.architectures , lowerCAmelCase__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
snake_case_ : int = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
snake_case_ : Union[str, Any] = __import__("transformers" , fromlist=[model_class] )
snake_case_ : Optional[Any] = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Optional[int] = model_cls(lowerCAmelCase__ )
except ImportError:
raise ImportError(
F'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
snake_case_ : int = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](lowerCAmelCase__ )
# encoder-decoder has vocab size saved differently
snake_case_ : Union[str, Any] = config.vocab_size if hasattr(lowerCAmelCase__ , "vocab_size" ) else config.encoder.vocab_size
snake_case_ : Dict = random_input_ids(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
snake_case_ : List[Any] = model(lowerCAmelCase__ , decoder_input_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , training=lowerCAmelCase__ )[0]
snake_case_ : Optional[Any] = tf.gradients(lowerCAmelCase__ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
snake_case_ : str = model(lowerCAmelCase__ , labels=lowerCAmelCase__ , training=lowerCAmelCase__ )[0]
snake_case_ : int = tf.gradients(lowerCAmelCase__ , model.trainable_variables )
return gradients
snake_case_ : Dict = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def _A ( self :List[Any] , lowerCAmelCase__ :Tuple ) -> float:
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(lowerCAmelCase__ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
snake_case_ : List[str] = timeit.repeat(
lowerCAmelCase__ , repeat=self.args.repeat , number=10 , )
return min(lowerCAmelCase__ ) / 1_0.0
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
def _A ( self :str , lowerCAmelCase__ :Callable[[], None] ) -> [Memory, MemorySummary]:
'''simple docstring'''
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
snake_case_ : Union[str, Any] = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
snake_case_ : Dict = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
snake_case_ : str = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
snake_case_ : int = nvml.nvmlDeviceGetMemoryInfo(lowerCAmelCase__ )
snake_case_ : int = meminfo.used
snake_case_ : str = Memory(lowerCAmelCase__ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
snake_case_ : Dict = None
else:
snake_case_ : int = measure_peak_memory_cpu(lowerCAmelCase__ )
snake_case_ : int = Memory(lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else memory_bytes
if self.args.trace_memory_line_by_line:
snake_case_ : Union[str, Any] = stop_memory_tracing(lowerCAmelCase__ )
if memory is None:
snake_case_ : int = summary.total
else:
snake_case_ : str = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 653 |
'''simple docstring'''
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> Dict:
"""simple docstring"""
snake_case_ : Tuple = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value")
snake_case_ : Union[str, Any] = (
("layer.", "layer_"),
("word_embeddings.weight", "word_embeddings"),
("position_embeddings.weight", "position_embeddings"),
("token_type_embeddings.weight", "token_type_embeddings"),
(".", "/"),
("LayerNorm/weight", "LayerNorm/gamma"),
("LayerNorm/bias", "LayerNorm/beta"),
("weight", "kernel"),
)
if not os.path.isdir(__magic_name__ ):
os.makedirs(__magic_name__ )
snake_case_ : str = model.state_dict()
def to_tf_var_name(__magic_name__ ):
for patt, repl in iter(__magic_name__ ):
snake_case_ : List[str] = name.replace(__magic_name__ ,__magic_name__ )
return F'''bert/{name}'''
def create_tf_var(__magic_name__ ,__magic_name__ ,__magic_name__ ):
snake_case_ : List[Any] = tf.dtypes.as_dtype(tensor.dtype )
snake_case_ : Union[str, Any] = tf.get_variable(dtype=__magic_name__ ,shape=tensor.shape ,name=__magic_name__ ,initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__magic_name__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
snake_case_ : Optional[int] = to_tf_var_name(__magic_name__ )
snake_case_ : Dict = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
snake_case_ : List[Any] = torch_tensor.T
snake_case_ : Union[str, Any] = create_tf_var(tensor=__magic_name__ ,name=__magic_name__ ,session=__magic_name__ )
tf.keras.backend.set_value(__magic_name__ ,__magic_name__ )
snake_case_ : List[str] = session.run(__magic_name__ )
print(F'''Successfully created {tf_name}: {np.allclose(__magic_name__ ,__magic_name__ )}''' )
snake_case_ : Any = tf.train.Saver(tf.trainable_variables() )
saver.save(__magic_name__ ,os.path.join(__magic_name__ ,model_name.replace("-" ,"_" ) + ".ckpt" ) )
def __UpperCAmelCase ( __magic_name__=None )-> Optional[Any]:
"""simple docstring"""
snake_case_ : Any = argparse.ArgumentParser()
parser.add_argument("--model_name" ,type=__magic_name__ ,required=__magic_name__ ,help="model name e.g. bert-base-uncased" )
parser.add_argument(
"--cache_dir" ,type=__magic_name__ ,default=__magic_name__ ,required=__magic_name__ ,help="Directory containing pytorch model" )
parser.add_argument("--pytorch_model_path" ,type=__magic_name__ ,required=__magic_name__ ,help="/path/to/<pytorch-model-name>.bin" )
parser.add_argument("--tf_cache_dir" ,type=__magic_name__ ,required=__magic_name__ ,help="Directory in which to save tensorflow model" )
snake_case_ : Optional[int] = parser.parse_args(__magic_name__ )
snake_case_ : Optional[int] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name ,state_dict=torch.load(args.pytorch_model_path ) ,cache_dir=args.cache_dir ,)
convert_pytorch_checkpoint_to_tf(model=__magic_name__ ,ckpt_dir=args.tf_cache_dir ,model_name=args.model_name )
if __name__ == "__main__":
main()
| 653 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ = 1000 )-> int:
"""simple docstring"""
snake_case_, snake_case_ : Tuple = 1, 1
snake_case_ : List[str] = []
for i in range(1 ,n + 1 ):
snake_case_ : Tuple = prev_numerator + 2 * prev_denominator
snake_case_ : Optional[int] = prev_numerator + prev_denominator
if len(str(__magic_name__ ) ) > len(str(__magic_name__ ) ):
result.append(__magic_name__ )
snake_case_ : Tuple = numerator
snake_case_ : str = denominator
return len(__magic_name__ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 653 |
'''simple docstring'''
from collections import deque
from .hash_table import HashTable
class A_ (a_ ):
"""simple docstring"""
def __init__( self :List[str] , *lowerCAmelCase__ :Optional[Any] , **lowerCAmelCase__ :Dict ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self :Optional[int] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(lowerCAmelCase__ )
snake_case_ : Tuple = self.values[key]
def _A ( self :int ) -> Dict:
'''simple docstring'''
return (
sum(self.charge_factor - len(lowerCAmelCase__ ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def _A ( self :str , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple=None ) -> Any:
'''simple docstring'''
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(lowerCAmelCase__ ) == 0
):
return key
return super()._collision_resolution(lowerCAmelCase__ , lowerCAmelCase__ )
| 653 | 1 |
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class A_ (tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :int , lowerCAmelCase__ :int=1 , lowerCAmelCase__ :Any=False , **lowerCAmelCase__ :str ) -> Dict:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
snake_case_ : Optional[int] = vocab_size
snake_case_ : Tuple = d_embed
snake_case_ : Tuple = d_proj
snake_case_ : Tuple = cutoffs + [vocab_size]
snake_case_ : Optional[int] = [0] + self.cutoffs
snake_case_ : List[str] = div_val
snake_case_ : Tuple = self.cutoffs[0]
snake_case_ : Union[str, Any] = len(self.cutoffs ) - 1
snake_case_ : List[Any] = self.shortlist_size + self.n_clusters
snake_case_ : Optional[Any] = keep_order
snake_case_ : int = []
snake_case_ : Dict = []
def _A ( self :str , lowerCAmelCase__ :Optional[int] ) -> str:
'''simple docstring'''
if self.n_clusters > 0:
snake_case_ : Optional[int] = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="zeros" , trainable=lowerCAmelCase__ , name="cluster_weight" )
snake_case_ : Dict = self.add_weight(
shape=(self.n_clusters,) , initializer="zeros" , trainable=lowerCAmelCase__ , name="cluster_bias" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
snake_case_ : List[str] = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="zeros" , trainable=lowerCAmelCase__ , name=F'''out_projs_._{i}''' , )
self.out_projs.append(lowerCAmelCase__ )
else:
self.out_projs.append(lowerCAmelCase__ )
snake_case_ : Any = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="zeros" , trainable=lowerCAmelCase__ , name=F'''out_layers_._{i}_._weight''' , )
snake_case_ : List[str] = self.add_weight(
shape=(self.vocab_size,) , initializer="zeros" , trainable=lowerCAmelCase__ , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
snake_case_, snake_case_ : List[str] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
snake_case_ : Optional[int] = self.d_embed // (self.div_val**i)
snake_case_ : int = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="zeros" , trainable=lowerCAmelCase__ , name=F'''out_projs_._{i}''' )
self.out_projs.append(lowerCAmelCase__ )
snake_case_ : List[str] = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="zeros" , trainable=lowerCAmelCase__ , name=F'''out_layers_._{i}_._weight''' , )
snake_case_ : List[str] = self.add_weight(
shape=(r_idx - l_idx,) , initializer="zeros" , trainable=lowerCAmelCase__ , name=F'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(lowerCAmelCase__ )
@staticmethod
def _A ( lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple=None ) -> str:
'''simple docstring'''
snake_case_ : Tuple = x
if proj is not None:
snake_case_ : List[str] = tf.einsum("ibd,ed->ibe" , lowerCAmelCase__ , lowerCAmelCase__ )
return tf.einsum("ibd,nd->ibn" , lowerCAmelCase__ , lowerCAmelCase__ ) + b
@staticmethod
def _A ( lowerCAmelCase__ :Tuple , lowerCAmelCase__ :str ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[int] = shape_list(lowerCAmelCase__ )
snake_case_ : Dict = tf.range(lp_size[0] , dtype=target.dtype )
snake_case_ : List[Any] = tf.stack([r, target] , 1 )
return tf.gather_nd(lowerCAmelCase__ , lowerCAmelCase__ )
def _A ( self :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Any=True , lowerCAmelCase__ :Any=False ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = 0
if self.n_clusters == 0:
snake_case_ : List[Any] = self._logit(lowerCAmelCase__ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
snake_case_ : Optional[Any] = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=lowerCAmelCase__ , logits=lowerCAmelCase__ )
snake_case_ : List[str] = tf.nn.log_softmax(lowerCAmelCase__ , axis=-1 )
else:
snake_case_ : Optional[int] = shape_list(lowerCAmelCase__ )
snake_case_ : int = []
snake_case_ : Dict = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
snake_case_, snake_case_ : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
snake_case_ : str = (target >= l_idx) & (target < r_idx)
snake_case_ : Optional[Any] = tf.where(lowerCAmelCase__ )
snake_case_ : Tuple = tf.boolean_mask(lowerCAmelCase__ , lowerCAmelCase__ ) - l_idx
if self.div_val == 1:
snake_case_ : Optional[int] = self.out_layers[0][0][l_idx:r_idx]
snake_case_ : Any = self.out_layers[0][1][l_idx:r_idx]
else:
snake_case_ : str = self.out_layers[i][0]
snake_case_ : List[str] = self.out_layers[i][1]
if i == 0:
snake_case_ : str = tf.concat([cur_W, self.cluster_weight] , 0 )
snake_case_ : Tuple = tf.concat([cur_b, self.cluster_bias] , 0 )
snake_case_ : List[str] = self._logit(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , self.out_projs[0] )
snake_case_ : Tuple = tf.nn.log_softmax(lowerCAmelCase__ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
snake_case_ : Optional[Any] = tf.boolean_mask(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[Any] = self._gather_logprob(lowerCAmelCase__ , lowerCAmelCase__ )
else:
snake_case_ : Any = self._logit(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , self.out_projs[i] )
snake_case_ : Optional[Any] = tf.nn.log_softmax(lowerCAmelCase__ )
snake_case_ : Any = self.cutoffs[0] + i - 1 # No probability for the head cluster
snake_case_ : int = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(lowerCAmelCase__ )
if target is not None:
snake_case_ : Optional[Any] = tf.boolean_mask(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tf.boolean_mask(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Tuple = self._gather_logprob(lowerCAmelCase__ , lowerCAmelCase__ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(lowerCAmelCase__ , -cur_logprob , shape_list(lowerCAmelCase__ ) )
snake_case_ : Optional[Any] = tf.concat(lowerCAmelCase__ , axis=-1 )
if target is not None:
if return_mean:
snake_case_ : Any = tf.reduce_mean(lowerCAmelCase__ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(lowerCAmelCase__ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(lowerCAmelCase__ , name=self.name , aggregation="mean" if return_mean else "" )
return out
| 653 |
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__lowerCamelCase : Dict = TypeVar('''KEY''')
__lowerCamelCase : int = TypeVar('''VAL''')
@dataclass(frozen=a_ , slots=a_ )
class A_ (Generic[KEY, VAL] ):
"""simple docstring"""
a__ = 42
a__ = 42
class A_ (_Item ):
"""simple docstring"""
def __init__( self :List[Any] ) -> None:
'''simple docstring'''
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def __bool__( self :Optional[int] ) -> bool:
'''simple docstring'''
return False
__lowerCamelCase : Dict = _DeletedItem()
class A_ (MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self :Dict , lowerCAmelCase__ :int = 8 , lowerCAmelCase__ :float = 0.7_5 ) -> None:
'''simple docstring'''
snake_case_ : Any = initial_block_size
snake_case_ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
snake_case_ : Tuple = capacity_factor
snake_case_ : List[Any] = 0
def _A ( self :Tuple , lowerCAmelCase__ :KEY ) -> int:
'''simple docstring'''
return hash(lowerCAmelCase__ ) % len(self._buckets )
def _A ( self :Any , lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def _A ( self :str , lowerCAmelCase__ :int , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> bool:
'''simple docstring'''
snake_case_ : Optional[int] = self._buckets[ind]
if not stored:
snake_case_ : int = _Item(lowerCAmelCase__ , lowerCAmelCase__ )
self._len += 1
return True
elif stored.key == key:
snake_case_ : Optional[int] = _Item(lowerCAmelCase__ , lowerCAmelCase__ )
return True
else:
return False
def _A ( self :int ) -> bool:
'''simple docstring'''
snake_case_ : Any = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowerCAmelCase__ )
def _A ( self :Any ) -> bool:
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
snake_case_ : Optional[int] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def _A ( self :Tuple , lowerCAmelCase__ :int ) -> None:
'''simple docstring'''
snake_case_ : Tuple = self._buckets
snake_case_ : int = [None] * new_size
snake_case_ : Any = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def _A ( self :Optional[int] ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def _A ( self :str ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def _A ( self :Optional[int] , lowerCAmelCase__ :KEY ) -> Iterator[int]:
'''simple docstring'''
snake_case_ : str = self._get_bucket_index(lowerCAmelCase__ )
for _ in range(len(self._buckets ) ):
yield ind
snake_case_ : List[Any] = self._get_next_ind(lowerCAmelCase__ )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(lowerCAmelCase__ ):
if self._try_set(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
break
def __setitem__( self :Optional[int] , lowerCAmelCase__ :KEY , lowerCAmelCase__ :VAL ) -> None:
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(lowerCAmelCase__ , lowerCAmelCase__ )
def __delitem__( self :List[Any] , lowerCAmelCase__ :KEY ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(lowerCAmelCase__ ):
snake_case_ : int = self._buckets[ind]
if item is None:
raise KeyError(lowerCAmelCase__ )
if item is _deleted:
continue
if item.key == key:
snake_case_ : List[str] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self :List[str] , lowerCAmelCase__ :KEY ) -> VAL:
'''simple docstring'''
for ind in self._iterate_buckets(lowerCAmelCase__ ):
snake_case_ : Optional[Any] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowerCAmelCase__ )
def __len__( self :Optional[Any] ) -> int:
'''simple docstring'''
return self._len
def __iter__( self :List[Any] ) -> Iterator[KEY]:
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__( self :Any ) -> str:
'''simple docstring'''
snake_case_ : Dict = " ,".join(
F'''{item.key}: {item.val}''' for item in self._buckets if item )
return F'''HashMap({val_string})'''
| 653 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class A_ (a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a__ = StableDiffusionPanoramaPipeline
a__ = TEXT_TO_IMAGE_PARAMS
a__ = TEXT_TO_IMAGE_BATCH_PARAMS
a__ = TEXT_TO_IMAGE_IMAGE_PARAMS
a__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def _A ( self :List[str] ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
snake_case_ : Optional[Any] = DDIMScheduler()
torch.manual_seed(0 )
snake_case_ : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case_ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
snake_case_ : Tuple = CLIPTextModel(lowerCAmelCase__ )
snake_case_ : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
snake_case_ : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _A ( self :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any]=0 ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[Any] = torch.manual_seed(lowerCAmelCase__ )
snake_case_ : Dict = {
"prompt": "a photo of the dolomites",
"generator": generator,
# Setting height and width to None to prevent OOMs on CPU.
"height": None,
"width": None,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _A ( self :Optional[int] ) -> str:
'''simple docstring'''
snake_case_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case_ : Tuple = self.get_dummy_components()
snake_case_ : Dict = StableDiffusionPanoramaPipeline(**lowerCAmelCase__ )
snake_case_ : List[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
snake_case_ : int = sd_pipe(**lowerCAmelCase__ ).images
snake_case_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ : int = np.array([0.6_1_8_6, 0.5_3_7_4, 0.4_9_1_5, 0.4_1_3_5, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_7, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _A ( self :Any ) -> Optional[Any]:
'''simple docstring'''
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def _A ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5E-3 )
def _A ( self :List[str] ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case_ : int = self.get_dummy_components()
snake_case_ : Tuple = StableDiffusionPanoramaPipeline(**lowerCAmelCase__ )
snake_case_ : List[str] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ )
snake_case_ : Optional[int] = "french fries"
snake_case_ : Optional[Any] = sd_pipe(**lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ )
snake_case_ : List[str] = output.images
snake_case_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ : str = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _A ( self :List[str] ) -> List[Any]:
'''simple docstring'''
snake_case_ : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case_ : List[Any] = self.get_dummy_components()
snake_case_ : List[Any] = StableDiffusionPanoramaPipeline(**lowerCAmelCase__ )
snake_case_ : int = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : str = self.get_dummy_inputs(lowerCAmelCase__ )
snake_case_ : Optional[int] = sd_pipe(**lowerCAmelCase__ , view_batch_size=2 )
snake_case_ : Optional[Any] = output.images
snake_case_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ : Tuple = np.array([0.6_1_8_7, 0.5_3_7_5, 0.4_9_1_5, 0.4_1_3_6, 0.4_1_1_4, 0.4_5_6_3, 0.5_1_2_8, 0.4_9_7_6, 0.4_7_5_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _A ( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case_ : List[str] = self.get_dummy_components()
snake_case_ : Any = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" )
snake_case_ : Optional[int] = StableDiffusionPanoramaPipeline(**lowerCAmelCase__ )
snake_case_ : Optional[int] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : Optional[Any] = self.get_dummy_inputs(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = sd_pipe(**lowerCAmelCase__ ).images
snake_case_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ : int = np.array([0.4_0_2_4, 0.6_5_1_0, 0.4_9_0_1, 0.5_3_7_8, 0.5_8_1_3, 0.5_6_2_2, 0.4_7_9_5, 0.4_4_6_7, 0.4_9_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _A ( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case_ : Optional[int] = self.get_dummy_components()
snake_case_ : Tuple = PNDMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , skip_prk_steps=lowerCAmelCase__ )
snake_case_ : str = StableDiffusionPanoramaPipeline(**lowerCAmelCase__ )
snake_case_ : Optional[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
snake_case_ : Any = self.get_dummy_inputs(lowerCAmelCase__ )
snake_case_ : Optional[int] = sd_pipe(**lowerCAmelCase__ ).images
snake_case_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ : str = np.array([0.6_3_9_1, 0.6_2_9_1, 0.4_8_6_1, 0.5_1_3_4, 0.5_5_5_2, 0.4_5_7_8, 0.5_0_3_2, 0.5_0_2_3, 0.4_5_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :List[str] ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self :Tuple , lowerCAmelCase__ :str=0 ) -> List[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = torch.manual_seed(lowerCAmelCase__ )
snake_case_ : Dict = {
"prompt": "a photo of the dolomites",
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def _A ( self :Tuple ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = "stabilityai/stable-diffusion-2-base"
snake_case_ : List[Any] = DDIMScheduler.from_pretrained(lowerCAmelCase__ , subfolder="scheduler" )
snake_case_ : Dict = StableDiffusionPanoramaPipeline.from_pretrained(lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
snake_case_ : Tuple = self.get_inputs()
snake_case_ : Tuple = pipe(**lowerCAmelCase__ ).images
snake_case_ : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2_048, 3)
snake_case_ : Any = np.array(
[
0.3_6_9_6_8_3_9_2,
0.2_7_0_2_5_3_7_2,
0.3_2_4_4_6_7_6_6,
0.2_8_3_7_9_3_8_7,
0.3_6_3_6_3_2_7_4,
0.3_0_7_3_3_3_4_7,
0.2_7_1_0_0_0_2_7,
0.2_7_0_5_4_1_2_5,
0.2_5_5_3_6_0_9_6,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def _A ( self :List[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : List[Any] = StableDiffusionPanoramaPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-base" , safety_checker=lowerCAmelCase__ )
snake_case_ : Tuple = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
snake_case_ : str = self.get_inputs()
snake_case_ : Any = pipe(**lowerCAmelCase__ ).images
snake_case_ : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2_048, 3)
snake_case_ : Tuple = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def _A ( self :Optional[Any] ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = 0
def callback_fn(lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :torch.FloatTensor ) -> None:
snake_case_ : Dict = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
snake_case_ : Optional[int] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
snake_case_ : Optional[Any] = latents[0, -3:, -3:, -1]
snake_case_ : int = np.array(
[
0.1_8_6_8_1_8_6_9,
0.3_3_9_0_7_8_1_6,
0.5_3_6_1_2_7_6,
0.1_4_4_3_2_8_6_5,
-0.0_2_8_5_6_6_1_1,
-0.7_3_9_4_1_1_2_3,
0.2_3_3_9_7_9_8_7,
0.4_7_3_2_2_6_8_2,
-0.3_7_8_2_3_1_6_4,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
snake_case_ : int = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
snake_case_ : Tuple = latents[0, -3:, -3:, -1]
snake_case_ : Dict = np.array(
[
0.1_8_5_3_9_6_4_5,
0.3_3_9_8_7_2_4_8,
0.5_3_7_8_5_5_9,
0.1_4_4_3_7_1_4_2,
-0.0_2_4_5_5_2_6_1,
-0.7_3_3_8_3_1_7,
0.2_3_9_9_0_7_5_5,
0.4_7_3_5_6_2_7_2,
-0.3_7_8_6_5_0_5,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
snake_case_ : Tuple = False
snake_case_ : str = "stabilityai/stable-diffusion-2-base"
snake_case_ : Any = DDIMScheduler.from_pretrained(lowerCAmelCase__ , subfolder="scheduler" )
snake_case_ : int = StableDiffusionPanoramaPipeline.from_pretrained(lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ )
snake_case_ : Tuple = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
snake_case_ : int = self.get_inputs()
pipe(**lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _A ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ : Optional[int] = "stabilityai/stable-diffusion-2-base"
snake_case_ : Dict = DDIMScheduler.from_pretrained(lowerCAmelCase__ , subfolder="scheduler" )
snake_case_ : Any = StableDiffusionPanoramaPipeline.from_pretrained(lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case_ : Dict = self.get_inputs()
snake_case_ : int = pipe(**lowerCAmelCase__ )
snake_case_ : Tuple = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 653 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : str = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
'''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''',
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''gpt_bigcode'''
a__ = ['''past_key_values''']
a__ = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self :List[Any] , lowerCAmelCase__ :Any=50_257 , lowerCAmelCase__ :Dict=1_024 , lowerCAmelCase__ :Optional[int]=768 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :int=12 , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :List[str]="gelu_pytorch_tanh" , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Any=1E-5 , lowerCAmelCase__ :Union[str, Any]=0.0_2 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :int=50_256 , lowerCAmelCase__ :List[str]=50_256 , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :str=True , lowerCAmelCase__ :int=True , **lowerCAmelCase__ :Union[str, Any] , ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = vocab_size
snake_case_ : Any = n_positions
snake_case_ : Any = n_embd
snake_case_ : Optional[Any] = n_layer
snake_case_ : List[Any] = n_head
snake_case_ : Tuple = n_inner
snake_case_ : str = activation_function
snake_case_ : Union[str, Any] = resid_pdrop
snake_case_ : Optional[Any] = embd_pdrop
snake_case_ : Any = attn_pdrop
snake_case_ : List[Any] = layer_norm_epsilon
snake_case_ : Tuple = initializer_range
snake_case_ : int = scale_attn_weights
snake_case_ : Union[str, Any] = use_cache
snake_case_ : Dict = attention_softmax_in_fpaa
snake_case_ : Any = scale_attention_softmax_in_fpaa
snake_case_ : List[str] = multi_query
snake_case_ : List[str] = bos_token_id
snake_case_ : Any = eos_token_id
super().__init__(bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
| 653 | 1 |
'''simple docstring'''
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__lowerCamelCase : int = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
__lowerCamelCase : Optional[Any] = (
subprocess.check_output(f'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode('''utf-8''').split()
)
__lowerCamelCase : int = '''|'''.join(sys.argv[1:])
__lowerCamelCase : Tuple = re.compile(Rf'''^({joined_dirs}).*?\.py$''')
__lowerCamelCase : List[Any] = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''')
| 653 |
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
__lowerCamelCase : Union[str, Any] = logging.getLogger(__name__)
def __UpperCAmelCase ( __magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Dict = git.Repo(search_parent_directories=__magic_name__ )
snake_case_ : Optional[int] = {
"repo_id": str(__magic_name__ ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
}
with open(os.path.join(__magic_name__ ,"git_log.json" ) ,"w" ) as f:
json.dump(__magic_name__ ,__magic_name__ ,indent=4 )
def __UpperCAmelCase ( __magic_name__ )-> Tuple:
"""simple docstring"""
if params.n_gpu <= 0:
snake_case_ : Any = 0
snake_case_ : Any = -1
snake_case_ : Tuple = True
snake_case_ : List[str] = False
return
assert torch.cuda.is_available()
logger.info("Initializing GPUs" )
if params.n_gpu > 1:
assert params.local_rank != -1
snake_case_ : Optional[int] = int(os.environ["WORLD_SIZE"] )
snake_case_ : int = int(os.environ["N_GPU_NODE"] )
snake_case_ : Any = int(os.environ["RANK"] )
# number of nodes / node ID
snake_case_ : Dict = params.world_size // params.n_gpu_per_node
snake_case_ : Optional[int] = params.global_rank // params.n_gpu_per_node
snake_case_ : Tuple = True
assert params.n_nodes == int(os.environ["N_NODES"] )
assert params.node_id == int(os.environ["NODE_RANK"] )
# local job (single GPU)
else:
assert params.local_rank == -1
snake_case_ : Optional[int] = 1
snake_case_ : str = 0
snake_case_ : List[Any] = 0
snake_case_ : int = 0
snake_case_ : Dict = 1
snake_case_ : Optional[Any] = 1
snake_case_ : str = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
snake_case_ : str = params.node_id == 0 and params.local_rank == 0
snake_case_ : str = params.n_nodes > 1
# summary
snake_case_ : str = F'''--- Global rank: {params.global_rank} - '''
logger.info(PREFIX + "Number of nodes: %i" % params.n_nodes )
logger.info(PREFIX + "Node ID : %i" % params.node_id )
logger.info(PREFIX + "Local rank : %i" % params.local_rank )
logger.info(PREFIX + "World size : %i" % params.world_size )
logger.info(PREFIX + "GPUs per node : %i" % params.n_gpu_per_node )
logger.info(PREFIX + "Master : %s" % str(params.is_master ) )
logger.info(PREFIX + "Multi-node : %s" % str(params.multi_node ) )
logger.info(PREFIX + "Multi-GPU : %s" % str(params.multi_gpu ) )
logger.info(PREFIX + "Hostname : %s" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("Initializing PyTorch distributed" )
torch.distributed.init_process_group(
init_method="env://" ,backend="nccl" ,)
def __UpperCAmelCase ( __magic_name__ )-> Dict:
"""simple docstring"""
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 653 | 1 |
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCamelCase : List[str] = 16
__lowerCamelCase : str = 32
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ = 16 )-> Tuple:
"""simple docstring"""
snake_case_ : Dict = AutoTokenizer.from_pretrained("bert-base-cased" )
snake_case_ : Tuple = DatasetDict(
{
"train": dataset["train"].select(__magic_name__ ),
"validation": dataset["train"].select(__magic_name__ ),
"test": dataset["validation"],
} )
def tokenize_function(__magic_name__ ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ : Dict = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=__magic_name__ ,max_length=__magic_name__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case_ : List[str] = datasets.map(
__magic_name__ ,batched=__magic_name__ ,remove_columns=["idx", "sentence1", "sentence2"] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ : List[str] = tokenized_datasets.rename_column("label" ,"labels" )
def collate_fn(__magic_name__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case_ : Union[str, Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case_ : str = 16
elif accelerator.mixed_precision != "no":
snake_case_ : Any = 8
else:
snake_case_ : Optional[Any] = None
return tokenizer.pad(
__magic_name__ ,padding="longest" ,max_length=__magic_name__ ,pad_to_multiple_of=__magic_name__ ,return_tensors="pt" ,)
# Instantiate dataloaders.
snake_case_ : List[Any] = DataLoader(
tokenized_datasets["train"] ,shuffle=__magic_name__ ,collate_fn=__magic_name__ ,batch_size=__magic_name__ )
snake_case_ : Union[str, Any] = DataLoader(
tokenized_datasets["validation"] ,shuffle=__magic_name__ ,collate_fn=__magic_name__ ,batch_size=__magic_name__ )
snake_case_ : List[str] = DataLoader(
tokenized_datasets["test"] ,shuffle=__magic_name__ ,collate_fn=__magic_name__ ,batch_size=__magic_name__ )
return train_dataloader, eval_dataloader, test_dataloader
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Dict:
"""simple docstring"""
snake_case_ : List[str] = []
# Download the dataset
snake_case_ : str = load_dataset("glue" ,"mrpc" )
# Create our splits
snake_case_ : int = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
snake_case_ : Optional[int] = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ : int = config["lr"]
snake_case_ : Optional[int] = int(config["num_epochs"] )
snake_case_ : Dict = int(config["seed"] )
snake_case_ : str = int(config["batch_size"] )
snake_case_ : Optional[Any] = evaluate.load("glue" ,"mrpc" )
# If the batch size is too big we use gradient accumulation
snake_case_ : Dict = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
snake_case_ : List[Any] = batch_size // MAX_GPU_BATCH_SIZE
snake_case_ : str = MAX_GPU_BATCH_SIZE
set_seed(__magic_name__ )
# New Code #
# Create our folds:
snake_case_ : int = kfold.split(np.zeros(datasets["train"].num_rows ) ,datasets["train"]["label"] )
snake_case_ : Optional[Any] = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(__magic_name__ ):
snake_case_, snake_case_, snake_case_ : Dict = get_fold_dataloaders(
__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ : str = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" ,return_dict=__magic_name__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case_ : int = model.to(accelerator.device )
# Instantiate optimizer
snake_case_ : List[str] = AdamW(params=model.parameters() ,lr=__magic_name__ )
# Instantiate scheduler
snake_case_ : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=__magic_name__ ,num_warmup_steps=100 ,num_training_steps=(len(__magic_name__ ) * num_epochs) // gradient_accumulation_steps ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : Any = accelerator.prepare(
__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
# Now we train the model
for epoch in range(__magic_name__ ):
model.train()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case_ : Union[str, Any] = model(**__magic_name__ )
snake_case_ : Tuple = outputs.loss
snake_case_ : Union[str, Any] = loss / gradient_accumulation_steps
accelerator.backward(__magic_name__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ : Tuple = model(**__magic_name__ )
snake_case_ : Dict = outputs.logits.argmax(dim=-1 )
snake_case_, snake_case_ : str = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=__magic_name__ ,references=__magic_name__ ,)
snake_case_ : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' ,__magic_name__ )
# New Code #
# We also run predictions on the test set at the very end
snake_case_ : Tuple = []
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ : int = model(**__magic_name__ )
snake_case_ : Dict = outputs.logits
snake_case_, snake_case_ : Dict = accelerator.gather_for_metrics((predictions, batch["labels"]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(__magic_name__ ,dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
snake_case_ : str = torch.cat(__magic_name__ ,dim=0 )
snake_case_ : int = torch.stack(__magic_name__ ,dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
snake_case_ : List[str] = metric.compute(predictions=__magic_name__ ,references=__magic_name__ )
accelerator.print("Average test metrics from all folds:" ,__magic_name__ )
def __UpperCAmelCase ( )-> List[Any]:
"""simple docstring"""
snake_case_ : Tuple = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" ,type=__magic_name__ ,default=__magic_name__ ,choices=["no", "fp16", "bf16", "fp8"] ,help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." ,)
parser.add_argument("--cpu" ,action="store_true" ,help="If passed, will train on the CPU." )
# New Code #
parser.add_argument("--num_folds" ,type=__magic_name__ ,default=3 ,help="The number of splits to perform across the dataset" )
snake_case_ : Tuple = parser.parse_args()
snake_case_ : List[str] = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(__magic_name__ ,__magic_name__ )
if __name__ == "__main__":
main()
| 653 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class A_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self :Any , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Dict=7 , lowerCAmelCase__ :Union[str, Any]=3 , lowerCAmelCase__ :List[str]=30 , lowerCAmelCase__ :List[str]=400 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :str=True , lowerCAmelCase__ :Optional[int]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :Optional[int]=[0.5, 0.5, 0.5] , lowerCAmelCase__ :str=True , lowerCAmelCase__ :int=1 / 255 , lowerCAmelCase__ :int=True , ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333}
snake_case_ : Dict = parent
snake_case_ : Union[str, Any] = batch_size
snake_case_ : Optional[Any] = num_channels
snake_case_ : str = min_resolution
snake_case_ : Dict = max_resolution
snake_case_ : Optional[Any] = do_resize
snake_case_ : str = size
snake_case_ : Optional[int] = do_normalize
snake_case_ : Dict = image_mean
snake_case_ : Optional[int] = image_std
snake_case_ : List[str] = do_rescale
snake_case_ : Dict = rescale_factor
snake_case_ : str = do_pad
def _A ( self :List[Any] ) -> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _A ( self :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str]=False ) -> str:
'''simple docstring'''
if not batched:
snake_case_ : List[str] = image_inputs[0]
if isinstance(lowerCAmelCase__ , Image.Image ):
snake_case_, snake_case_ : int = image.size
else:
snake_case_, snake_case_ : Any = image.shape[1], image.shape[2]
if w < h:
snake_case_ : int = int(self.size["shortest_edge"] * h / w )
snake_case_ : List[Any] = self.size["shortest_edge"]
elif w > h:
snake_case_ : Optional[int] = self.size["shortest_edge"]
snake_case_ : str = int(self.size["shortest_edge"] * w / h )
else:
snake_case_ : Tuple = self.size["shortest_edge"]
snake_case_ : Dict = self.size["shortest_edge"]
else:
snake_case_ : List[str] = []
for image in image_inputs:
snake_case_, snake_case_ : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case_ : str = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0]
snake_case_ : int = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = YolosImageProcessor if is_vision_available() else None
def _A ( self :Optional[Any] ) -> str:
'''simple docstring'''
snake_case_ : int = YolosImageProcessingTester(self )
@property
def _A ( self :List[str] ) -> Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _A ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , "image_mean" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "image_std" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , "size" ) )
def _A ( self :List[Any] ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
snake_case_ : Optional[int] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCAmelCase__ )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase__ )
def _A ( self :List[str] ) -> int:
'''simple docstring'''
pass
def _A ( self :Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
snake_case_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : int = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_, snake_case_ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
snake_case_ : Any = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _A ( self :Dict ) -> Dict:
'''simple docstring'''
snake_case_ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
snake_case_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : List[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : Tuple = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : Dict = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _A ( self :Tuple ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
snake_case_ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case_ : List[Any] = image_processing(lowerCAmelCase__ , return_tensors="pt" ).pixel_values
snake_case_, snake_case_ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _A ( self :Tuple ) -> Dict:
'''simple docstring'''
snake_case_ : str = self.image_processing_class(**self.image_processor_dict )
snake_case_ : List[Any] = self.image_processing_class(do_resize=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ , do_rescale=lowerCAmelCase__ )
# create random PyTorch tensors
snake_case_ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
snake_case_ : Tuple = image_processing_a.pad(lowerCAmelCase__ , return_tensors="pt" )
snake_case_ : Union[str, Any] = image_processing_a(lowerCAmelCase__ , return_tensors="pt" )
self.assertTrue(
torch.allclose(encoded_images_with_method["pixel_values"] , encoded_images["pixel_values"] , atol=1E-4 ) )
@slow
def _A ( self :str ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case_ : int = json.loads(f.read() )
snake_case_ : Optional[int] = {"image_id": 39_769, "annotations": target}
# encode them
snake_case_ : Tuple = YolosImageProcessor.from_pretrained("hustvl/yolos-small" )
snake_case_ : Dict = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , return_tensors="pt" )
# verify pixel values
snake_case_ : Optional[int] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase__ )
snake_case_ : str = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
# verify area
snake_case_ : Dict = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase__ ) )
# verify boxes
snake_case_ : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase__ )
snake_case_ : Any = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase__ , atol=1E-3 ) )
# verify image_id
snake_case_ : Dict = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase__ ) )
# verify is_crowd
snake_case_ : int = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase__ ) )
# verify class_labels
snake_case_ : List[str] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase__ ) )
# verify orig_size
snake_case_ : Any = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase__ ) )
# verify size
snake_case_ : List[Any] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase__ ) )
@slow
def _A ( self :Dict ) -> int:
'''simple docstring'''
snake_case_ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case_ : Optional[int] = json.loads(f.read() )
snake_case_ : Tuple = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target}
snake_case_ : Any = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case_ : int = YolosImageProcessor(format="coco_panoptic" )
snake_case_ : Union[str, Any] = image_processing(images=lowerCAmelCase__ , annotations=lowerCAmelCase__ , masks_path=lowerCAmelCase__ , return_tensors="pt" )
# verify pixel values
snake_case_ : Optional[int] = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase__ )
snake_case_ : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
# verify area
snake_case_ : int = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase__ ) )
# verify boxes
snake_case_ : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase__ )
snake_case_ : List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase__ , atol=1E-3 ) )
# verify image_id
snake_case_ : List[str] = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase__ ) )
# verify is_crowd
snake_case_ : Dict = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase__ ) )
# verify class_labels
snake_case_ : str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase__ ) )
# verify masks
snake_case_ : Any = 822_873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCAmelCase__ )
# verify orig_size
snake_case_ : int = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase__ ) )
# verify size
snake_case_ : Union[str, Any] = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase__ ) )
| 653 | 1 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A_ (a_ ):
"""simple docstring"""
a__ = ['''image_processor''', '''tokenizer''']
a__ = '''ChineseCLIPImageProcessor'''
a__ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self :Optional[Any] , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :Optional[Any]=None , **lowerCAmelCase__ :List[Any] ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCAmelCase__ , )
snake_case_ : str = kwargs.pop("feature_extractor" )
snake_case_ : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Tuple = self.image_processor
def __call__( self :List[Any] , lowerCAmelCase__ :int=None , lowerCAmelCase__ :Union[str, Any]=None , lowerCAmelCase__ :List[str]=None , **lowerCAmelCase__ :Tuple ) -> Optional[Any]:
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
snake_case_ : int = self.tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if images is not None:
snake_case_ : Optional[int] = self.image_processor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
if text is not None and images is not None:
snake_case_ : int = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase__ ) , tensor_type=lowerCAmelCase__ )
def _A ( self :Optional[Any] , *lowerCAmelCase__ :Union[str, Any] , **lowerCAmelCase__ :Optional[int] ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self :Tuple , *lowerCAmelCase__ :List[Any] , **lowerCAmelCase__ :Tuple ) -> List[str]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def _A ( self :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : str = self.tokenizer.model_input_names
snake_case_ : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _A ( self :Tuple ) -> Optional[int]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowerCAmelCase__ , )
return self.image_processor_class
| 653 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
if not isinstance(__magic_name__ ,__magic_name__ ):
raise ValueError("iterations must be defined as integers" )
if not isinstance(__magic_name__ ,__magic_name__ ) or not number >= 1:
raise ValueError(
"starting number must be\n and integer and be more than 0" )
if not iterations >= 1:
raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" )
snake_case_ : Dict = ""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__magic_name__ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 653 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ )-> list[list[float]]:
"""simple docstring"""
snake_case_ : list[list[float]] = []
for data in source_data:
for i, el in enumerate(__magic_name__ ):
if len(__magic_name__ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(__magic_name__ ) )
return data_lists
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> list[list[float]]:
"""simple docstring"""
snake_case_ : list[list[float]] = []
for dlist, weight in zip(__magic_name__ ,__magic_name__ ):
snake_case_ : Tuple = min(__magic_name__ )
snake_case_ : int = max(__magic_name__ )
snake_case_ : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
snake_case_ : List[str] = F'''Invalid weight of {weight:f} provided'''
raise ValueError(__magic_name__ )
score_lists.append(__magic_name__ )
return score_lists
def __UpperCAmelCase ( __magic_name__ )-> list[float]:
"""simple docstring"""
snake_case_ : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(__magic_name__ ):
snake_case_ : Optional[int] = final_scores[j] + ele
return final_scores
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> list[list[float]]:
"""simple docstring"""
snake_case_ : List[str] = get_data(__magic_name__ )
snake_case_ : str = calculate_each_score(__magic_name__ ,__magic_name__ )
snake_case_ : Dict = generate_final_scores(__magic_name__ )
# append scores to source data
for i, ele in enumerate(__magic_name__ ):
source_data[i].append(__magic_name__ )
return source_data
| 653 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCamelCase : Tuple = 16
__lowerCamelCase : Optional[int] = 32
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = 16 )-> int:
"""simple docstring"""
snake_case_ : Optional[int] = AutoTokenizer.from_pretrained("bert-base-cased" )
snake_case_ : str = load_dataset("glue" ,"mrpc" )
def tokenize_function(__magic_name__ ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ : Dict = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=__magic_name__ ,max_length=__magic_name__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case_ : Any = datasets.map(
__magic_name__ ,batched=__magic_name__ ,remove_columns=["idx", "sentence1", "sentence2"] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ : List[Any] = tokenized_datasets.rename_column("label" ,"labels" )
def collate_fn(__magic_name__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case_ : int = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case_ : Tuple = 16
elif accelerator.mixed_precision != "no":
snake_case_ : str = 8
else:
snake_case_ : Optional[Any] = None
return tokenizer.pad(
__magic_name__ ,padding="longest" ,max_length=__magic_name__ ,pad_to_multiple_of=__magic_name__ ,return_tensors="pt" ,)
# Instantiate dataloaders.
snake_case_ : str = DataLoader(
tokenized_datasets["train"] ,shuffle=__magic_name__ ,collate_fn=__magic_name__ ,batch_size=__magic_name__ )
snake_case_ : Optional[Any] = DataLoader(
tokenized_datasets["validation"] ,shuffle=__magic_name__ ,collate_fn=__magic_name__ ,batch_size=__magic_name__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__lowerCamelCase : Optional[Any] = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Dict:
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" ,__magic_name__ ) == "1":
snake_case_ : List[str] = 2
# Initialize accelerator
snake_case_ : Union[str, Any] = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ : List[str] = config["lr"]
snake_case_ : Dict = int(config["num_epochs"] )
snake_case_ : Dict = int(config["seed"] )
snake_case_ : Optional[int] = int(config["batch_size"] )
snake_case_ : Dict = evaluate.load("glue" ,"mrpc" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__magic_name__ )
def inner_training_loop(__magic_name__ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__magic_name__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" ,return_dict=__magic_name__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case_ : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
snake_case_ : List[Any] = AdamW(params=model.parameters() ,lr=__magic_name__ )
snake_case_, snake_case_ : int = get_dataloaders(__magic_name__ ,__magic_name__ )
# Instantiate scheduler
snake_case_ : Tuple = get_linear_schedule_with_warmup(
optimizer=__magic_name__ ,num_warmup_steps=100 ,num_training_steps=(len(__magic_name__ ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : Tuple = accelerator.prepare(
__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
# Now we train the model
for epoch in range(__magic_name__ ):
model.train()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case_ : int = model(**__magic_name__ )
snake_case_ : Any = outputs.loss
accelerator.backward(__magic_name__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ : Union[str, Any] = model(**__magic_name__ )
snake_case_ : List[str] = outputs.logits.argmax(dim=-1 )
snake_case_, snake_case_ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=__magic_name__ ,references=__magic_name__ ,)
snake_case_ : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' ,__magic_name__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
snake_case_ : List[Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" ,type=__magic_name__ ,default=__magic_name__ ,choices=["no", "fp16", "bf16", "fp8"] ,help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." ,)
parser.add_argument("--cpu" ,action="store_true" ,help="If passed, will train on the CPU." )
snake_case_ : str = parser.parse_args()
snake_case_ : Optional[int] = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(__magic_name__ ,__magic_name__ )
if __name__ == "__main__":
main()
| 653 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
def __UpperCAmelCase ( __magic_name__ )-> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(__magic_name__ ,(list, tuple) ) and isinstance(videos[0] ,(list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__magic_name__ ,(list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__magic_name__ ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class A_ (a_ ):
"""simple docstring"""
a__ = ['''pixel_values''']
def __init__( self :Union[str, Any] , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Dict[str, int] = None , lowerCAmelCase__ :PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Dict[str, int] = None , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Union[int, float] = 1 / 255 , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , **lowerCAmelCase__ :Optional[Any] , ) -> None:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
snake_case_ : Dict = size if size is not None else {"shortest_edge": 256}
snake_case_ : Optional[int] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
snake_case_ : Optional[int] = crop_size if crop_size is not None else {"height": 224, "width": 224}
snake_case_ : Union[str, Any] = get_size_dict(lowerCAmelCase__ , param_name="crop_size" )
snake_case_ : Union[str, Any] = do_resize
snake_case_ : List[str] = size
snake_case_ : Dict = do_center_crop
snake_case_ : int = crop_size
snake_case_ : Dict = resample
snake_case_ : Dict = do_rescale
snake_case_ : Any = rescale_factor
snake_case_ : Optional[Any] = offset
snake_case_ : Any = do_normalize
snake_case_ : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ : str = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _A ( self :List[str] , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Dict[str, int] , lowerCAmelCase__ :PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :Dict , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : Optional[int] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
if "shortest_edge" in size:
snake_case_ : Tuple = get_resize_output_image_size(lowerCAmelCase__ , size["shortest_edge"] , default_to_square=lowerCAmelCase__ )
elif "height" in size and "width" in size:
snake_case_ : str = (size["height"], size["width"])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self :Dict , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Dict[str, int] , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :Tuple , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : Tuple = get_size_dict(lowerCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(lowerCAmelCase__ , size=(size["height"], size["width"]) , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self :List[str] , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Union[int, float] , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :Optional[Any] , ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = image.astype(np.floataa )
if offset:
snake_case_ : str = image - (scale / 2)
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self :List[str] , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Union[float, List[float]] , lowerCAmelCase__ :Union[float, List[float]] , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :str , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :ImageInput , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :Dict[str, int] = None , lowerCAmelCase__ :PILImageResampling = None , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :Dict[str, int] = None , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :float = None , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , lowerCAmelCase__ :Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
snake_case_ : Dict = to_numpy_array(lowerCAmelCase__ )
if do_resize:
snake_case_ : int = self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ )
if do_center_crop:
snake_case_ : Optional[Any] = self.center_crop(lowerCAmelCase__ , size=lowerCAmelCase__ )
if do_rescale:
snake_case_ : Optional[int] = self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ , offset=lowerCAmelCase__ )
if do_normalize:
snake_case_ : Optional[Any] = self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ )
snake_case_ : Dict = to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ )
return image
def _A ( self :Dict , lowerCAmelCase__ :ImageInput , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :Dict[str, int] = None , lowerCAmelCase__ :PILImageResampling = None , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :Dict[str, int] = None , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :float = None , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , lowerCAmelCase__ :Optional[Union[str, TensorType]] = None , lowerCAmelCase__ :ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase__ :List[Any] , ) -> PIL.Image.Image:
'''simple docstring'''
snake_case_ : List[str] = do_resize if do_resize is not None else self.do_resize
snake_case_ : Optional[int] = resample if resample is not None else self.resample
snake_case_ : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : Dict = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : List[str] = offset if offset is not None else self.offset
snake_case_ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : List[Any] = image_mean if image_mean is not None else self.image_mean
snake_case_ : List[str] = image_std if image_std is not None else self.image_std
snake_case_ : Tuple = size if size is not None else self.size
snake_case_ : List[str] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
snake_case_ : Optional[Any] = crop_size if crop_size is not None else self.crop_size
snake_case_ : Tuple = get_size_dict(lowerCAmelCase__ , param_name="crop_size" )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
snake_case_ : int = make_batched(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = [
[
self._preprocess_image(
image=lowerCAmelCase__ , do_resize=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , do_center_crop=lowerCAmelCase__ , crop_size=lowerCAmelCase__ , do_rescale=lowerCAmelCase__ , rescale_factor=lowerCAmelCase__ , offset=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ , image_mean=lowerCAmelCase__ , image_std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , )
for img in video
]
for video in videos
]
snake_case_ : Optional[int] = {"pixel_values": videos}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
| 653 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class A_ (a_ ):
"""simple docstring"""
a__ = '''facebook/bart-large-mnli'''
a__ = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
a__ = '''text_classifier'''
a__ = AutoTokenizer
a__ = AutoModelForSequenceClassification
a__ = ['''text''', ['''text''']]
a__ = ['''text''']
def _A ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
super().setup()
snake_case_ : Optional[int] = self.model.config
snake_case_ : Any = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
snake_case_ : Union[str, Any] = int(lowerCAmelCase__ )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def _A ( self :Dict , lowerCAmelCase__ :int , lowerCAmelCase__ :Tuple ) -> int:
'''simple docstring'''
snake_case_ : Tuple = labels
return self.pre_processor(
[text] * len(lowerCAmelCase__ ) , [F'''This example is {label}''' for label in labels] , return_tensors="pt" , padding="max_length" , )
def _A ( self :Any , lowerCAmelCase__ :str ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = outputs.logits
snake_case_ : Tuple = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 653 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
__lowerCamelCase : int = '''Create a default config file for Accelerate with only a few flags set.'''
def __UpperCAmelCase ( __magic_name__="no" ,__magic_name__ = default_json_config_file ,__magic_name__ = False )-> Any:
"""simple docstring"""
snake_case_ : Dict = Path(__magic_name__ )
path.parent.mkdir(parents=__magic_name__ ,exist_ok=__magic_name__ )
if path.exists():
print(
F'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' )
return False
snake_case_ : int = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' )
snake_case_ : List[Any] = {
"compute_environment": "LOCAL_MACHINE",
"mixed_precision": mixed_precision,
}
if torch.cuda.is_available():
snake_case_ : Any = torch.cuda.device_count()
snake_case_ : Dict = num_gpus
snake_case_ : Union[str, Any] = False
if num_gpus > 1:
snake_case_ : Optional[int] = "MULTI_GPU"
else:
snake_case_ : List[Any] = "NO"
elif is_xpu_available() and use_xpu:
snake_case_ : List[str] = torch.xpu.device_count()
snake_case_ : Any = num_xpus
snake_case_ : List[str] = False
if num_xpus > 1:
snake_case_ : Union[str, Any] = "MULTI_XPU"
else:
snake_case_ : Any = "NO"
elif is_npu_available():
snake_case_ : Dict = torch.npu.device_count()
snake_case_ : Tuple = num_npus
snake_case_ : List[Any] = False
if num_npus > 1:
snake_case_ : Optional[int] = "MULTI_NPU"
else:
snake_case_ : int = "NO"
else:
snake_case_ : Optional[Any] = 0
snake_case_ : Optional[int] = True
snake_case_ : int = 1
snake_case_ : List[str] = "NO"
snake_case_ : List[str] = ClusterConfig(**__magic_name__ )
config.to_json_file(__magic_name__ )
return path
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Optional[int]:
"""simple docstring"""
snake_case_ : Dict = parser.add_parser("default" ,parents=__magic_name__ ,help=__magic_name__ ,formatter_class=__magic_name__ )
parser.add_argument(
"--config_file" ,default=__magic_name__ ,help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) ,dest="save_location" ,)
parser.add_argument(
"--mixed_precision" ,choices=["no", "fp16", "bf16"] ,type=__magic_name__ ,help="Whether or not to use mixed precision training. "
"Choose between FP16 and BF16 (bfloat16) training. "
"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later." ,default="no" ,)
parser.set_defaults(func=__magic_name__ )
return parser
def __UpperCAmelCase ( __magic_name__ )-> Any:
"""simple docstring"""
snake_case_ : Optional[Any] = write_basic_config(args.mixed_precision ,args.save_location )
if config_file:
print(F'''accelerate configuration saved at {config_file}''' )
| 653 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase : Any = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''ViTFeatureExtractor''']
__lowerCamelCase : Any = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__lowerCamelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 653 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class A_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self :Union[str, Any] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str]=13 , lowerCAmelCase__ :int=7 , lowerCAmelCase__ :Any=True , lowerCAmelCase__ :int=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :int=True , lowerCAmelCase__ :Tuple=99 , lowerCAmelCase__ :Any=32 , lowerCAmelCase__ :Optional[Any]=5 , lowerCAmelCase__ :str=4 , lowerCAmelCase__ :Tuple=37 , lowerCAmelCase__ :int="gelu" , lowerCAmelCase__ :Optional[int]=0.1 , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :Optional[Any]=512 , lowerCAmelCase__ :Optional[int]=16 , lowerCAmelCase__ :List[str]=2 , lowerCAmelCase__ :str=0.0_2 , lowerCAmelCase__ :Tuple=4 , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = parent
snake_case_ : Tuple = batch_size
snake_case_ : List[str] = seq_length
snake_case_ : int = is_training
snake_case_ : Tuple = use_attention_mask
snake_case_ : List[str] = use_token_type_ids
snake_case_ : List[Any] = use_labels
snake_case_ : Union[str, Any] = vocab_size
snake_case_ : Any = hidden_size
snake_case_ : Optional[int] = num_hidden_layers
snake_case_ : Any = num_attention_heads
snake_case_ : Any = intermediate_size
snake_case_ : List[Any] = hidden_act
snake_case_ : str = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : int = max_position_embeddings
snake_case_ : Optional[int] = type_vocab_size
snake_case_ : Dict = type_sequence_label_size
snake_case_ : Tuple = initializer_range
snake_case_ : List[str] = num_choices
def _A ( self :Optional[int] ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : List[str] = None
if self.use_attention_mask:
snake_case_ : str = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Union[str, Any] = None
if self.use_token_type_ids:
snake_case_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : int = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _A ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[int] = self.prepare_config_and_inputs()
snake_case_, snake_case_, snake_case_, snake_case_ : List[Any] = config_and_inputs
snake_case_ : Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def _A ( self :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.prepare_config_and_inputs()
snake_case_, snake_case_, snake_case_, snake_case_ : List[Any] = config_and_inputs
snake_case_ : Optional[Any] = True
snake_case_ : Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = True
a__ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _A ( self :int ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = FlaxRobertaModelTester(self )
@slow
def _A ( self :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case_ : Optional[int] = model_class_name.from_pretrained("roberta-base" , from_pt=lowerCAmelCase__ )
snake_case_ : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase__ )
| 653 |
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A_ :
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str]=2 , lowerCAmelCase__ :List[Any]=3 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :List[str]=7 , lowerCAmelCase__ :Any=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :List[str]=99 , lowerCAmelCase__ :Union[str, Any]=36 , lowerCAmelCase__ :Dict=3 , lowerCAmelCase__ :str=4 , lowerCAmelCase__ :Optional[int]=37 , lowerCAmelCase__ :Dict="gelu" , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :Optional[int]=512 , lowerCAmelCase__ :Union[str, Any]=16 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :Any=0.0_2 , lowerCAmelCase__ :Dict=6 , lowerCAmelCase__ :Optional[int]=6 , lowerCAmelCase__ :Any=3 , lowerCAmelCase__ :int=4 , lowerCAmelCase__ :int=None , lowerCAmelCase__ :Any=1_000 , ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = parent
snake_case_ : Union[str, Any] = batch_size
snake_case_ : Optional[int] = num_channels
snake_case_ : List[Any] = image_size
snake_case_ : Optional[int] = patch_size
snake_case_ : Union[str, Any] = text_seq_length
snake_case_ : Dict = is_training
snake_case_ : Optional[Any] = use_input_mask
snake_case_ : Union[str, Any] = use_token_type_ids
snake_case_ : Dict = use_labels
snake_case_ : List[str] = vocab_size
snake_case_ : Optional[Any] = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : int = num_attention_heads
snake_case_ : List[str] = intermediate_size
snake_case_ : str = hidden_act
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Optional[int] = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = max_position_embeddings
snake_case_ : List[Any] = type_vocab_size
snake_case_ : Union[str, Any] = type_sequence_label_size
snake_case_ : List[Any] = initializer_range
snake_case_ : Union[str, Any] = coordinate_size
snake_case_ : int = shape_size
snake_case_ : Tuple = num_labels
snake_case_ : List[Any] = num_choices
snake_case_ : List[str] = scope
snake_case_ : Dict = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
snake_case_ : str = text_seq_length
snake_case_ : Optional[int] = (image_size // patch_size) ** 2 + 1
snake_case_ : str = self.text_seq_length + self.image_seq_length
def _A ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
snake_case_ : str = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case_ : Optional[Any] = bbox[i, j, 3]
snake_case_ : Any = bbox[i, j, 1]
snake_case_ : Tuple = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case_ : str = bbox[i, j, 2]
snake_case_ : Dict = bbox[i, j, 0]
snake_case_ : Union[str, Any] = t
snake_case_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : Dict = None
if self.use_input_mask:
snake_case_ : str = random_attention_mask([self.batch_size, self.text_seq_length] )
snake_case_ : Any = None
if self.use_token_type_ids:
snake_case_ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
snake_case_ : Union[str, Any] = None
snake_case_ : str = None
if self.use_labels:
snake_case_ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
snake_case_ : str = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _A ( self :Dict , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = LayoutLMvaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
# text + image
snake_case_ : Tuple = model(lowerCAmelCase__ , pixel_values=lowerCAmelCase__ )
snake_case_ : Optional[int] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
snake_case_ : Optional[int] = model(lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
snake_case_ : int = model(lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
snake_case_ : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
snake_case_ : Union[str, Any] = model(pixel_values=lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _A ( self :str , lowerCAmelCase__ :str , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = self.num_labels
snake_case_ : List[Any] = LayoutLMvaForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : Optional[int] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = self.num_labels
snake_case_ : str = LayoutLMvaForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : List[Any] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _A ( self :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :str , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :str ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = LayoutLMvaForQuestionAnswering(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
snake_case_ : List[Any] = model(
lowerCAmelCase__ , bbox=lowerCAmelCase__ , pixel_values=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A ( self :int ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) : Optional[Any] = config_and_inputs
snake_case_ : Tuple = {
"input_ids": input_ids,
"bbox": bbox,
"pixel_values": pixel_values,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class A_ (a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a__ = False
a__ = False
a__ = False
a__ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
a__ = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def _A ( self :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[Any] ) -> List[str]:
'''simple docstring'''
return True
def _A ( self :List[Any] ) -> str:
'''simple docstring'''
snake_case_ : Tuple = LayoutLMvaModelTester(self )
snake_case_ : Optional[int] = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def _A ( self :Tuple , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any]=False ) -> Any:
'''simple docstring'''
snake_case_ : List[str] = copy.deepcopy(lowerCAmelCase__ )
if model_class in get_values(lowerCAmelCase__ ):
snake_case_ : Optional[Any] = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(lowerCAmelCase__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCAmelCase__ ):
snake_case_ : Union[str, Any] = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
elif model_class in get_values(lowerCAmelCase__ ):
snake_case_ : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
snake_case_ : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
elif model_class in [
*get_values(lowerCAmelCase__ ),
]:
snake_case_ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
elif model_class in [
*get_values(lowerCAmelCase__ ),
]:
snake_case_ : List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowerCAmelCase__ , )
return inputs_dict
def _A ( self :Any ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def _A ( self :int ) -> int:
'''simple docstring'''
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _A ( self :Any ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ : int = type
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _A ( self :int ) -> str:
'''simple docstring'''
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def _A ( self :List[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
def _A ( self :int ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
@slow
def _A ( self :Tuple ) -> List[Any]:
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : str = LayoutLMvaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
snake_case_ : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class A_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def _A ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase__ ) if is_vision_available() else None
@slow
def _A ( self :Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = LayoutLMvaModel.from_pretrained("microsoft/layoutlmv3-base" ).to(lowerCAmelCase__ )
snake_case_ : Optional[Any] = self.default_image_processor
snake_case_ : Optional[int] = prepare_img()
snake_case_ : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors="pt" ).pixel_values.to(lowerCAmelCase__ )
snake_case_ : List[str] = torch.tensor([[1, 2]] )
snake_case_ : Any = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
snake_case_ : Any = model(
input_ids=input_ids.to(lowerCAmelCase__ ) , bbox=bbox.to(lowerCAmelCase__ ) , pixel_values=pixel_values.to(lowerCAmelCase__ ) , )
# verify the logits
snake_case_ : Optional[Any] = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase__ )
snake_case_ : str = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
| 653 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
__lowerCamelCase : List[str] = logging.get_logger(__name__)
class A_ (a_ ):
"""simple docstring"""
def __init__( self :Optional[int] , *lowerCAmelCase__ :List[str] , **lowerCAmelCase__ :str ) -> None:
'''simple docstring'''
warnings.warn(
"The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PoolFormerImageProcessor instead." , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 653 |
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def __UpperCAmelCase ( __magic_name__ )-> int: # picklable for multiprocessing
"""simple docstring"""
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
with parallel_backend("spark" ):
assert ParallelBackendConfig.backend_name == "spark"
snake_case_ : str = [1, 2, 3]
with pytest.raises(__magic_name__ ):
with parallel_backend("unsupported backend" ):
map_nested(__magic_name__ ,__magic_name__ ,num_proc=2 )
with pytest.raises(__magic_name__ ):
with parallel_backend("unsupported backend" ):
map_nested(__magic_name__ ,__magic_name__ ,num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("num_proc" ,[2, -1] )
def __UpperCAmelCase ( __magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = [1, 2]
snake_case_ : Union[str, Any] = {"a": 1, "b": 2}
snake_case_ : str = {"a": [1, 2], "b": [3, 4]}
snake_case_ : List[str] = {"a": {"1": 1}, "b": 2}
snake_case_ : Optional[int] = {"a": 1, "b": 2, "c": 3, "d": 4}
snake_case_ : Tuple = [2, 3]
snake_case_ : str = {"a": 2, "b": 3}
snake_case_ : Dict = {"a": [2, 3], "b": [4, 5]}
snake_case_ : List[Any] = {"a": {"1": 2}, "b": 3}
snake_case_ : str = {"a": 2, "b": 3, "c": 4, "d": 5}
with parallel_backend("spark" ):
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
assert map_nested(__magic_name__ ,__magic_name__ ,num_proc=__magic_name__ ) == expected_map_nested_sa
| 653 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ (a_ ):
"""simple docstring"""
a__ = ['''image_processor''', '''tokenizer''']
a__ = '''Pix2StructImageProcessor'''
a__ = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Dict ) -> List[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = False
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
def __call__( self :str , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Union[bool, str, PaddingStrategy] = False , lowerCAmelCase__ :Union[bool, str, TruncationStrategy] = None , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :Optional[int] = 2_048 , lowerCAmelCase__ :int = 0 , lowerCAmelCase__ :Optional[int] = None , lowerCAmelCase__ :Optional[bool] = None , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Optional[Union[str, TensorType]] = None , **lowerCAmelCase__ :Optional[int] , ) -> BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None and not self.image_processor.is_vqa:
snake_case_ : Tuple = self.tokenizer
snake_case_ : Any = self.tokenizer(
text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
snake_case_ : Optional[int] = self.image_processor(
lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , max_patches=lowerCAmelCase__ , **lowerCAmelCase__ )
else:
# add pixel_values and bbox
snake_case_ : Dict = self.image_processor(
lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , max_patches=lowerCAmelCase__ , header_text=lowerCAmelCase__ , **lowerCAmelCase__ )
if text is not None and not self.image_processor.is_vqa:
snake_case_ : Union[str, Any] = self.tokenizer(
text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
if "attention_mask" in text_encoding:
snake_case_ : List[str] = text_encoding.pop("attention_mask" )
if "input_ids" in text_encoding:
snake_case_ : Optional[int] = text_encoding.pop("input_ids" )
else:
snake_case_ : List[Any] = None
if text_encoding is not None:
encoding_image_processor.update(lowerCAmelCase__ )
return encoding_image_processor
def _A ( self :Optional[Any] , *lowerCAmelCase__ :Dict , **lowerCAmelCase__ :List[Any] ) -> Tuple:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self :Union[str, Any] , *lowerCAmelCase__ :Any , **lowerCAmelCase__ :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def _A ( self :int ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[int] = self.tokenizer.model_input_names
snake_case_ : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 653 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Dict = logging.get_logger(__name__)
# TODO Update this
__lowerCamelCase : int = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''esm'''
def __init__( self :Dict , lowerCAmelCase__ :List[Any]=None , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :str=None , lowerCAmelCase__ :int=768 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :Dict=12 , lowerCAmelCase__ :Union[str, Any]=3_072 , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :List[Any]=1_026 , lowerCAmelCase__ :int=0.0_2 , lowerCAmelCase__ :Optional[int]=1E-1_2 , lowerCAmelCase__ :List[str]="absolute" , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :List[str]=False , lowerCAmelCase__ :List[Any]=False , lowerCAmelCase__ :Dict=None , lowerCAmelCase__ :str=None , **lowerCAmelCase__ :Union[str, Any] , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=lowerCAmelCase__ , mask_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case_ : str = vocab_size
snake_case_ : str = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : List[str] = num_attention_heads
snake_case_ : Any = intermediate_size
snake_case_ : Optional[Any] = hidden_dropout_prob
snake_case_ : Tuple = attention_probs_dropout_prob
snake_case_ : List[Any] = max_position_embeddings
snake_case_ : str = initializer_range
snake_case_ : List[Any] = layer_norm_eps
snake_case_ : str = position_embedding_type
snake_case_ : Optional[int] = use_cache
snake_case_ : str = emb_layer_norm_before
snake_case_ : List[Any] = token_dropout
snake_case_ : str = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values." )
snake_case_ : Optional[Any] = EsmFoldConfig()
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : Union[str, Any] = EsmFoldConfig(**lowerCAmelCase__ )
snake_case_ : Optional[Any] = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!" )
snake_case_ : List[str] = get_default_vocab_list()
else:
snake_case_ : List[str] = vocab_list
else:
snake_case_ : List[Any] = None
snake_case_ : int = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , lowerCAmelCase__ ):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!" )
def _A ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
snake_case_ : Any = super().to_dict()
if isinstance(self.esmfold_config , lowerCAmelCase__ ):
snake_case_ : Optional[int] = self.esmfold_config.to_dict()
return output
@dataclass
class A_ :
"""simple docstring"""
a__ = None
a__ = True
a__ = False
a__ = False
a__ = False
a__ = 0
a__ = True
a__ = False
a__ = 128
a__ = None
def _A ( self :Dict ) -> int:
'''simple docstring'''
if self.trunk is None:
snake_case_ : Dict = TrunkConfig()
elif isinstance(self.trunk , lowerCAmelCase__ ):
snake_case_ : int = TrunkConfig(**self.trunk )
def _A ( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Tuple = asdict(self )
snake_case_ : Optional[int] = self.trunk.to_dict()
return output
@dataclass
class A_ :
"""simple docstring"""
a__ = 48
a__ = 1024
a__ = 128
a__ = 32
a__ = 32
a__ = 32
a__ = 0
a__ = 0
a__ = False
a__ = 4
a__ = 128
a__ = None
def _A ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
if self.structure_module is None:
snake_case_ : Optional[int] = StructureModuleConfig()
elif isinstance(self.structure_module , lowerCAmelCase__ ):
snake_case_ : List[str] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
snake_case_ : Dict = self.sequence_state_dim // self.sequence_head_width
snake_case_ : Optional[int] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def _A ( self :Tuple ) -> List[str]:
'''simple docstring'''
snake_case_ : int = asdict(self )
snake_case_ : Dict = self.structure_module.to_dict()
return output
@dataclass
class A_ :
"""simple docstring"""
a__ = 384
a__ = 128
a__ = 16
a__ = 128
a__ = 12
a__ = 4
a__ = 8
a__ = 0.1
a__ = 8
a__ = 1
a__ = 2
a__ = 7
a__ = 10
a__ = 1E-8
a__ = 1E5
def _A ( self :Dict ) -> Dict:
'''simple docstring'''
return asdict(self )
def __UpperCAmelCase ( )-> int:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 653 | 1 |
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 653 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : Any = {
'''configuration_longformer''': [
'''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LongformerConfig''',
'''LongformerOnnxConfig''',
],
'''tokenization_longformer''': ['''LongformerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''LongformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
'''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongformerForMaskedLM''',
'''LongformerForMultipleChoice''',
'''LongformerForQuestionAnswering''',
'''LongformerForSequenceClassification''',
'''LongformerForTokenClassification''',
'''LongformerModel''',
'''LongformerPreTrainedModel''',
'''LongformerSelfAttention''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLongformerForMaskedLM''',
'''TFLongformerForMultipleChoice''',
'''TFLongformerForQuestionAnswering''',
'''TFLongformerForSequenceClassification''',
'''TFLongformerForTokenClassification''',
'''TFLongformerModel''',
'''TFLongformerPreTrainedModel''',
'''TFLongformerSelfAttention''',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
__lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 653 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''dpt'''
def __init__( self :Dict , lowerCAmelCase__ :Tuple=768 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :List[Any]=12 , lowerCAmelCase__ :Optional[Any]=3_072 , lowerCAmelCase__ :Optional[Any]="gelu" , lowerCAmelCase__ :str=0.0 , lowerCAmelCase__ :List[Any]=0.0 , lowerCAmelCase__ :Dict=0.0_2 , lowerCAmelCase__ :Dict=1E-1_2 , lowerCAmelCase__ :Optional[Any]=384 , lowerCAmelCase__ :Dict=16 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :List[Any]=False , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :Optional[int]=[2, 5, 8, 11] , lowerCAmelCase__ :Optional[Any]="project" , lowerCAmelCase__ :Dict=[4, 2, 1, 0.5] , lowerCAmelCase__ :Dict=[96, 192, 384, 768] , lowerCAmelCase__ :int=256 , lowerCAmelCase__ :int=-1 , lowerCAmelCase__ :Union[str, Any]=False , lowerCAmelCase__ :str=True , lowerCAmelCase__ :str=0.4 , lowerCAmelCase__ :Any=255 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :str=[1, 1_024, 24, 24] , lowerCAmelCase__ :str=[0, 1] , lowerCAmelCase__ :Union[str, Any]=None , **lowerCAmelCase__ :List[Any] , ) -> Any:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
snake_case_ : str = hidden_size
snake_case_ : Optional[int] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("Initializing the config with a `BiT` backbone." )
snake_case_ : Any = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
}
snake_case_ : str = BitConfig(**lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
logger.info("Initializing the config with a `BiT` backbone." )
snake_case_ : Tuple = BitConfig(**lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case_ : str = backbone_config
else:
raise ValueError(
F'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
snake_case_ : Dict = backbone_featmap_shape
snake_case_ : Any = neck_ignore_stages
if readout_type != "project":
raise ValueError("Readout type must be 'project' when using `DPT-hybrid` mode." )
else:
snake_case_ : Tuple = None
snake_case_ : List[Any] = None
snake_case_ : int = []
snake_case_ : Tuple = num_hidden_layers
snake_case_ : Any = num_attention_heads
snake_case_ : Union[str, Any] = intermediate_size
snake_case_ : Any = hidden_act
snake_case_ : str = hidden_dropout_prob
snake_case_ : int = attention_probs_dropout_prob
snake_case_ : Optional[Any] = initializer_range
snake_case_ : str = layer_norm_eps
snake_case_ : str = image_size
snake_case_ : Dict = patch_size
snake_case_ : List[Any] = num_channels
snake_case_ : Optional[Any] = qkv_bias
snake_case_ : List[str] = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("Readout_type must be one of ['ignore', 'add', 'project']" )
snake_case_ : List[Any] = readout_type
snake_case_ : Optional[Any] = reassemble_factors
snake_case_ : Union[str, Any] = neck_hidden_sizes
snake_case_ : Tuple = fusion_hidden_size
snake_case_ : Dict = head_in_index
snake_case_ : Optional[int] = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
snake_case_ : str = use_auxiliary_head
snake_case_ : List[str] = auxiliary_loss_weight
snake_case_ : Union[str, Any] = semantic_loss_ignore_index
snake_case_ : Dict = semantic_classifier_dropout
def _A ( self :int ) -> Any:
'''simple docstring'''
snake_case_ : Tuple = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
snake_case_ : List[str] = self.backbone_config.to_dict()
snake_case_ : Tuple = self.__class__.model_type
return output
| 653 |
'''simple docstring'''
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
__lowerCamelCase : Optional[int] = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class A_ :
"""simple docstring"""
def __init__( self :Tuple , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[Any]=16 , lowerCAmelCase__ :Any=13 , lowerCAmelCase__ :Optional[Any]=7 , lowerCAmelCase__ :str=14 , lowerCAmelCase__ :Union[str, Any]=10 , lowerCAmelCase__ :Tuple=19 , lowerCAmelCase__ :Optional[Any]=5 , lowerCAmelCase__ :Dict=4 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Any=16 , lowerCAmelCase__ :str=2 , lowerCAmelCase__ :List[Any]=4 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :str="gelu" , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :Optional[int]=[1, 2, 3, 4, 5] , lowerCAmelCase__ :str=25 , lowerCAmelCase__ :Optional[Any]=5 , ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = d_model
snake_case_ : Dict = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : Optional[Any] = prediction_length
snake_case_ : str = context_length
snake_case_ : Tuple = cardinality
snake_case_ : List[str] = num_time_features
snake_case_ : Optional[Any] = lags_sequence
snake_case_ : Union[str, Any] = embedding_dimension
snake_case_ : Optional[Any] = is_training
snake_case_ : Optional[Any] = hidden_size
snake_case_ : Any = num_hidden_layers
snake_case_ : Optional[Any] = num_attention_heads
snake_case_ : int = intermediate_size
snake_case_ : Any = hidden_act
snake_case_ : Union[str, Any] = hidden_dropout_prob
snake_case_ : Dict = attention_probs_dropout_prob
snake_case_ : List[str] = context_length
snake_case_ : Any = prediction_length + label_length
snake_case_ : Union[str, Any] = label_length
snake_case_ : List[Any] = moving_average
snake_case_ : str = autocorrelation_factor
def _A ( self :List[Any] ) -> Any:
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def _A ( self :Union[str, Any] , lowerCAmelCase__ :Optional[Any] ) -> Dict:
'''simple docstring'''
snake_case_ : Any = config.context_length + max(config.lags_sequence )
snake_case_ : Union[str, Any] = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
snake_case_ : Optional[int] = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
snake_case_ : List[Any] = floats_tensor([self.batch_size, _past_length] )
snake_case_ : Dict = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
snake_case_ : List[Any] = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
snake_case_ : List[Any] = floats_tensor([self.batch_size, config.prediction_length] )
snake_case_ : int = {
"past_values": past_values,
"static_categorical_features": static_categorical_features,
"past_time_features": past_time_features,
"past_observed_mask": past_observed_mask,
"future_time_features": future_time_features,
"future_values": future_values,
}
return inputs_dict
def _A ( self :Dict ) -> Tuple:
'''simple docstring'''
snake_case_ : str = self.get_config()
snake_case_ : int = self.prepare_autoformer_inputs_dict(lowerCAmelCase__ )
return config, inputs_dict
def _A ( self :Optional[int] ) -> Dict:
'''simple docstring'''
snake_case_, snake_case_ : Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def _A ( self :Tuple , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[int] ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = AutoformerModel(config=lowerCAmelCase__ ).to(lowerCAmelCase__ ).eval()
snake_case_ : Optional[int] = model(**lowerCAmelCase__ )
snake_case_ : Any = outputs.encoder_last_hidden_state
snake_case_ : Dict = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Optional[Any] = model.get_encoder()
encoder.save_pretrained(lowerCAmelCase__ )
snake_case_ : Tuple = AutoformerEncoder.from_pretrained(lowerCAmelCase__ ).to(lowerCAmelCase__ )
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : List[str] = model.create_network_inputs(**lowerCAmelCase__ )
snake_case_, snake_case_ : Optional[int] = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
snake_case_ : List[Any] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
snake_case_ : Optional[int] = encoder(inputs_embeds=lowerCAmelCase__ )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
snake_case_ : Any = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
snake_case_ : List[str] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
snake_case_ : Optional[Any] = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
snake_case_ : Any = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : List[Any] = model.get_decoder()
decoder.save_pretrained(lowerCAmelCase__ )
snake_case_ : int = AutoformerDecoder.from_pretrained(lowerCAmelCase__ ).to(lowerCAmelCase__ )
snake_case_ : Tuple = decoder(
trend=lowerCAmelCase__ , inputs_embeds=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class A_ (a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a__ = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
a__ = (AutoformerForPrediction,) if is_torch_available() else ()
a__ = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
a__ = False
def _A ( self :Dict ) -> int:
'''simple docstring'''
snake_case_ : Tuple = AutoformerModelTester(self )
snake_case_ : str = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def _A ( self :List[str] ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def _A ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_, snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
snake_case_ : List[Any] = model_class(lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase__ )
snake_case_, snake_case_ : str = model_class.from_pretrained(lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
self.assertEqual(info["missing_keys"] , [] )
def _A ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
snake_case_ : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*lowerCAmelCase__ )
@unittest.skip(reason="Model has no tokens embeddings" )
def _A ( self :str ) -> str:
'''simple docstring'''
pass
def _A ( self :Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[Any] = inspect.signature(getattr(lowerCAmelCase__ , "forward" ) )
# The main input is the name of the argument after `self`
snake_case_ : Dict = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , lowerCAmelCase__ )
def _A ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_, snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Tuple = model_class(lowerCAmelCase__ )
snake_case_ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : Optional[Any] = [*signature.parameters.keys()]
snake_case_ : Dict = [
"past_values",
"past_time_features",
"past_observed_mask",
"static_categorical_features",
"static_real_features",
"future_values",
"future_time_features",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("future_observed_mask" )
expected_arg_names.extend(
[
"decoder_attention_mask",
"head_mask",
"decoder_head_mask",
"cross_attn_head_mask",
"encoder_outputs",
"past_key_values",
"output_hidden_states",
"output_attentions",
"use_cache",
"return_dict",
] )
self.assertListEqual(arg_names[: len(lowerCAmelCase__ )] , lowerCAmelCase__ )
def _A ( self :int ) -> Any:
'''simple docstring'''
snake_case_, snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Union[str, Any] = True
snake_case_ : List[str] = getattr(self.model_tester , "seq_length" , lowerCAmelCase__ )
snake_case_ : Dict = getattr(self.model_tester , "decoder_seq_length" , lowerCAmelCase__ )
snake_case_ : Union[str, Any] = getattr(self.model_tester , "encoder_seq_length" , lowerCAmelCase__ )
snake_case_ : Union[str, Any] = getattr(self.model_tester , "d_model" , lowerCAmelCase__ )
snake_case_ : Dict = getattr(self.model_tester , "num_attention_heads" , lowerCAmelCase__ )
snake_case_ : Optional[int] = d_model // num_attention_heads
for model_class in self.all_model_classes:
snake_case_ : Any = True
snake_case_ : Any = False
snake_case_ : Dict = True
snake_case_ : List[str] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case_ : Tuple = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
snake_case_ : Union[str, Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case_ : Optional[int] = True
snake_case_ : Any = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case_ : List[str] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
snake_case_ : str = outputs.encoder_attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
snake_case_ : Tuple = len(lowerCAmelCase__ )
snake_case_ : List[str] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# decoder attentions
snake_case_ : Optional[int] = outputs.decoder_attentions
self.assertIsInstance(lowerCAmelCase__ , (list, tuple) )
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
snake_case_ : List[Any] = outputs.cross_attentions
self.assertIsInstance(lowerCAmelCase__ , (list, tuple) )
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
snake_case_ : Optional[int] = True
snake_case_ : List[Any] = True
snake_case_ : Union[str, Any] = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
snake_case_ : List[Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(out_len + 2 , len(lowerCAmelCase__ ) )
snake_case_ : Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def _A ( self :Any ) -> Optional[Any]:
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def __UpperCAmelCase ( __magic_name__="train-batch.pt" )-> int:
"""simple docstring"""
snake_case_ : List[str] = hf_hub_download(repo_id="hf-internal-testing/tourism-monthly-batch" ,filename=__magic_name__ ,repo_type="dataset" )
snake_case_ : List[str] = torch.load(__magic_name__ ,map_location=__magic_name__ )
return batch
@require_torch
@slow
class A_ (unittest.TestCase ):
"""simple docstring"""
def _A ( self :str ) -> Any:
'''simple docstring'''
snake_case_ : Optional[int] = AutoformerModel.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ )
snake_case_ : List[str] = prepare_batch()
with torch.no_grad():
snake_case_ : int = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , future_values=batch["future_values"] , future_time_features=batch["future_time_features"] , )[0]
snake_case_ : Optional[int] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , lowerCAmelCase__ )
snake_case_ : Optional[Any] = torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=lowerCAmelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
def _A ( self :Any ) -> str:
'''simple docstring'''
snake_case_ : str = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ )
snake_case_ : Optional[Any] = prepare_batch("val-batch.pt" )
with torch.no_grad():
snake_case_ : Tuple = model(
past_values=batch["past_values"] , past_time_features=batch["past_time_features"] , past_observed_mask=batch["past_observed_mask"] , static_categorical_features=batch["static_categorical_features"] , ).encoder_last_hidden_state
snake_case_ : Dict = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , lowerCAmelCase__ )
snake_case_ : Any = torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=lowerCAmelCase__ )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCAmelCase__ , atol=lowerCAmelCase__ ) )
def _A ( self :List[str] ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = AutoformerForPrediction.from_pretrained("huggingface/autoformer-tourism-monthly" ).to(lowerCAmelCase__ )
snake_case_ : str = prepare_batch("val-batch.pt" )
with torch.no_grad():
snake_case_ : Optional[Any] = model.generate(
static_categorical_features=batch["static_categorical_features"] , past_time_features=batch["past_time_features"] , past_values=batch["past_values"] , future_time_features=batch["future_time_features"] , past_observed_mask=batch["past_observed_mask"] , )
snake_case_ : List[Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , lowerCAmelCase__ )
snake_case_ : Dict = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=lowerCAmelCase__ )
snake_case_ : Optional[Any] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , lowerCAmelCase__ , rtol=1E-1 ) )
| 653 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class A_ (a_ ):
"""simple docstring"""
def __init__( self :Dict , lowerCAmelCase__ :Optional[NestedDataStructureLike[PathLike]] = None , lowerCAmelCase__ :Optional[NamedSplit] = None , lowerCAmelCase__ :Optional[Features] = None , lowerCAmelCase__ :str = None , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[int] = None , **lowerCAmelCase__ :List[str] , ) -> Dict:
'''simple docstring'''
snake_case_ : List[str] = path_or_paths
snake_case_ : Tuple = split if split or isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else "train"
snake_case_ : str = features
snake_case_ : int = cache_dir
snake_case_ : Dict = keep_in_memory
snake_case_ : List[Any] = streaming
snake_case_ : Union[str, Any] = num_proc
snake_case_ : Any = kwargs
@abstractmethod
def _A ( self :str ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
'''simple docstring'''
pass
class A_ (a_ ):
"""simple docstring"""
def __init__( self :List[Any] , lowerCAmelCase__ :Optional[Features] = None , lowerCAmelCase__ :str = None , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[int] = None , **lowerCAmelCase__ :List[Any] , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = features
snake_case_ : Optional[int] = cache_dir
snake_case_ : Union[str, Any] = keep_in_memory
snake_case_ : List[str] = streaming
snake_case_ : Any = num_proc
snake_case_ : int = kwargs
@abstractmethod
def _A ( self :Dict ) -> Union[Dataset, IterableDataset]:
'''simple docstring'''
pass
| 653 |
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ (a_ , unittest.TestCase ):
"""simple docstring"""
a__ = RobertaTokenizer
a__ = RobertaTokenizerFast
a__ = True
a__ = {'''cls_token''': '''<s>'''}
def _A ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case_ : List[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
snake_case_ : Tuple = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
snake_case_ : List[Any] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
snake_case_ : int = {"unk_token": "<unk>"}
snake_case_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
def _A ( self :Optional[Any] , **lowerCAmelCase__ :str ) -> str:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _A ( self :Any , **lowerCAmelCase__ :Tuple ) -> Optional[int]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _A ( self :Optional[int] , lowerCAmelCase__ :str ) -> Optional[int]:
'''simple docstring'''
snake_case_ : int = "lower newer"
snake_case_ : Tuple = "lower newer"
return input_text, output_text
def _A ( self :Tuple ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : str = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case_ : Dict = "lower newer"
snake_case_ : int = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
snake_case_ : str = tokenizer.tokenize(lowerCAmelCase__ ) # , add_prefix_space=True)
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[str] = tokens + [tokenizer.unk_token]
snake_case_ : Optional[int] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _A ( self :Any ) -> str:
'''simple docstring'''
snake_case_ : List[str] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=lowerCAmelCase__ ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=lowerCAmelCase__ ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def _A ( self :str ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = self.tokenizer_class.from_pretrained("roberta-base" )
snake_case_ : List[str] = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase__ )
snake_case_ : List[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase__ )
snake_case_ : List[str] = tokenizer.encode(
"sequence builders" , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
snake_case_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
snake_case_ : Any = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _A ( self :List[Any] ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = self.get_tokenizer()
snake_case_ : Tuple = "Encode this sequence."
snake_case_ : Optional[Any] = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
snake_case_ : str = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
snake_case_ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
snake_case_ : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
snake_case_ : str = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
snake_case_ : int = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Testing spaces after special tokens
snake_case_ : List[Any] = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )} ) # mask token has a left space
snake_case_ : str = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
snake_case_ : List[str] = "Encode <mask> sequence"
snake_case_ : List[Any] = "Encode <mask>sequence"
snake_case_ : Tuple = tokenizer.encode(lowerCAmelCase__ )
snake_case_ : int = encoded.index(lowerCAmelCase__ )
snake_case_ : Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : List[str] = tokenizer.encode(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = encoded.index(lowerCAmelCase__ )
snake_case_ : int = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _A ( self :Tuple ) -> Tuple:
'''simple docstring'''
pass
def _A ( self :int ) -> Optional[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case_ : List[Any] = self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case_ : Any = "A, <mask> AllenNLP sentence."
snake_case_ : str = tokenizer_r.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
snake_case_ : int = tokenizer_p.encode_plus(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
snake_case_ : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
snake_case_ : str = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowerCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def _A ( self :int ) -> Tuple:
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
snake_case_ : str = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
snake_case_ : Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , lowerCAmelCase__ )
self.assertEqual(post_processor_state["add_prefix_space"] , lowerCAmelCase__ )
self.assertEqual(post_processor_state["trim_offsets"] , lowerCAmelCase__ )
def _A ( self :List[str] ) -> List[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ : str = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
snake_case_ : Tuple = F'''{text_of_1_token} {text_of_1_token}'''
snake_case_ : Any = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : List[str] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Tuple = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ), len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : Dict = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : str = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ), len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : Tuple = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
snake_case_ : Dict = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Union[str, Any] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ) + 1, 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : Any = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Any = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ), 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
snake_case_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ )
snake_case_ : Optional[int] = tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ), 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
| 653 | 1 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
__lowerCamelCase : Dict = logging.get_logger(__name__)
@add_end_docstrings(a_ )
class A_ (a_ ):
"""simple docstring"""
def __init__( self :Any , *lowerCAmelCase__ :Dict , **lowerCAmelCase__ :List[str] ) -> Optional[int]:
'''simple docstring'''
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def _A ( self :Any , lowerCAmelCase__ :List[str]=None , lowerCAmelCase__ :int=None , lowerCAmelCase__ :int=None ) -> List[Any]:
'''simple docstring'''
snake_case_ : int = {}
snake_case_ : int = {}
if prompt is not None:
snake_case_ : Any = prompt
if generate_kwargs is not None:
snake_case_ : Optional[Any] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
snake_case_ : Optional[Any] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"
" please use only one" )
snake_case_ : List[Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self :Optional[int] , lowerCAmelCase__ :Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCAmelCase__ :str ) -> str:
'''simple docstring'''
return super().__call__(lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self :List[str] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Optional[int]=None ) -> Dict:
'''simple docstring'''
snake_case_ : Any = load_image(lowerCAmelCase__ )
if prompt is not None:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError(
F'''Received an invalid text input, got - {type(lowerCAmelCase__ )} - but expected a single string. '''
"Note also that one single text can be provided for conditional image to text generation." )
snake_case_ : int = self.model.config.model_type
if model_type == "git":
snake_case_ : str = self.image_processor(images=lowerCAmelCase__ , return_tensors=self.framework )
snake_case_ : List[Any] = self.tokenizer(text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ).input_ids
snake_case_ : Union[str, Any] = [self.tokenizer.cls_token_id] + input_ids
snake_case_ : int = torch.tensor(lowerCAmelCase__ ).unsqueeze(0 )
model_inputs.update({"input_ids": input_ids} )
elif model_type == "pix2struct":
snake_case_ : Tuple = self.image_processor(images=lowerCAmelCase__ , header_text=lowerCAmelCase__ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
snake_case_ : str = self.image_processor(images=lowerCAmelCase__ , return_tensors=self.framework )
snake_case_ : Any = self.tokenizer(lowerCAmelCase__ , return_tensors=self.framework )
model_inputs.update(lowerCAmelCase__ )
else:
raise ValueError(F'''Model type {model_type} does not support conditional text generation''' )
else:
snake_case_ : Optional[int] = self.image_processor(images=lowerCAmelCase__ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
snake_case_ : int = None
return model_inputs
def _A ( self :Union[str, Any] , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str]=None ) -> Optional[int]:
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs["input_ids"] , lowerCAmelCase__ )
and all(x is None for x in model_inputs["input_ids"] )
):
snake_case_ : List[Any] = None
if generate_kwargs is None:
snake_case_ : str = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
snake_case_ : Optional[int] = model_inputs.pop(self.model.main_input_name )
snake_case_ : Optional[int] = self.model.generate(lowerCAmelCase__ , **lowerCAmelCase__ , **lowerCAmelCase__ )
return model_outputs
def _A ( self :Optional[int] , lowerCAmelCase__ :Optional[int] ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = []
for output_ids in model_outputs:
snake_case_ : Union[str, Any] = {
"generated_text": self.tokenizer.decode(
lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , )
}
records.append(lowerCAmelCase__ )
return records
| 653 |
'''simple docstring'''
import math
def __UpperCAmelCase ( __magic_name__ )-> bool:
"""simple docstring"""
snake_case_ : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__magic_name__ )
def __UpperCAmelCase ( __magic_name__ = 1 / 1_2345 )-> int:
"""simple docstring"""
snake_case_ : Any = 0
snake_case_ : int = 0
snake_case_ : Union[str, Any] = 3
while True:
snake_case_ : Any = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__magic_name__ ):
snake_case_ : Optional[Any] = int(__magic_name__ )
total_partitions += 1
if check_partition_perfect(__magic_name__ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__magic_name__ )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 653 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> tuple[float, float]:
"""simple docstring"""
if not len(__magic_name__ ) == len(__magic_name__ ) == 3:
raise ValueError("Please enter a valid equation." )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("Both a & b of two equations can't be zero." )
# Extract the coefficients
snake_case_, snake_case_, snake_case_ : Union[str, Any] = equationa
snake_case_, snake_case_, snake_case_ : int = equationa
# Calculate the determinants of the matrices
snake_case_ : Optional[int] = aa * ba - aa * ba
snake_case_ : Tuple = ca * ba - ca * ba
snake_case_ : Optional[Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("Infinite solutions. (Consistent system)" )
else:
raise ValueError("No solution. (Inconsistent system)" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
snake_case_ : Dict = determinant_x / determinant
snake_case_ : Dict = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 653 |
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : int = logging.get_logger()
@dataclass
class A_ :
"""simple docstring"""
a__ = 42
a__ = field(default_factory=a_ )
a__ = field(default_factory=a_ )
def _A ( self :List[Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tensor , lowerCAmelCase__ :Tensor ) -> int:
'''simple docstring'''
snake_case_ : int = len(list(m.modules() ) ) == 1 or isinstance(lowerCAmelCase__ , nn.Convad ) or isinstance(lowerCAmelCase__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowerCAmelCase__ )
def __call__( self :List[Any] , lowerCAmelCase__ :Tensor ) -> Union[str, Any]:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowerCAmelCase__ )
[x.remove() for x in self.handles]
return self
@property
def _A ( self :int ) -> List[Any]:
'''simple docstring'''
return list(filter(lambda lowerCAmelCase__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class A_ :
"""simple docstring"""
a__ = 42
a__ = 42
a__ = 0
a__ = field(default_factory=a_ )
a__ = field(default_factory=a_ )
def __call__( self :Tuple , lowerCAmelCase__ :Tensor ) -> Tuple:
'''simple docstring'''
snake_case_ : List[Any] = Tracker(self.dest )(lowerCAmelCase__ ).parametrized
snake_case_ : Tuple = Tracker(self.src )(lowerCAmelCase__ ).parametrized
snake_case_ : List[str] = list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.src_skip , lowerCAmelCase__ ) )
snake_case_ : Tuple = list(filter(lambda lowerCAmelCase__ : type(lowerCAmelCase__ ) not in self.dest_skip , lowerCAmelCase__ ) )
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise Exception(
F'''Numbers of operations are different. Source module has {len(lowerCAmelCase__ )} operations while'''
F''' destination module has {len(lowerCAmelCase__ )}.''' )
for dest_m, src_m in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'''Transfered from={src_m} to={dest_m}''' )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ = True )-> Optional[int]:
"""simple docstring"""
print(F'''Converting {name}...''' )
with torch.no_grad():
snake_case_ : List[str] = timm.create_model(__magic_name__ ,pretrained=__magic_name__ ).eval()
snake_case_ : Optional[int] = ResNetForImageClassification(__magic_name__ ).eval()
snake_case_ : Dict = ModuleTransfer(src=__magic_name__ ,dest=__magic_name__ )
snake_case_ : Optional[int] = torch.randn((1, 3, 224, 224) )
module_transfer(__magic_name__ )
assert torch.allclose(from_model(__magic_name__ ) ,our_model(__magic_name__ ).logits ), "The model logits don't match the original one."
snake_case_ : str = F'''resnet{'-'.join(name.split('resnet' ) )}'''
print(__magic_name__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name ,commit_message="Add model" ,use_temp_dir=__magic_name__ ,)
# we can use the convnext one
snake_case_ : Optional[Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name ,commit_message="Add image processor" ,use_temp_dir=__magic_name__ ,)
print(F'''Pushed {checkpoint_name}''' )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = None ,__magic_name__ = True )-> Tuple:
"""simple docstring"""
snake_case_ : List[str] = "imagenet-1k-id2label.json"
snake_case_ : Optional[Any] = 1000
snake_case_ : List[Any] = (1, num_labels)
snake_case_ : Optional[Any] = "huggingface/label-files"
snake_case_ : Dict = num_labels
snake_case_ : List[Any] = json.load(open(hf_hub_download(__magic_name__ ,__magic_name__ ,repo_type="dataset" ) ,"r" ) )
snake_case_ : List[str] = {int(__magic_name__ ): v for k, v in idalabel.items()}
snake_case_ : Any = idalabel
snake_case_ : List[Any] = {v: k for k, v in idalabel.items()}
snake_case_ : Optional[int] = partial(__magic_name__ ,num_labels=__magic_name__ ,idalabel=__magic_name__ ,labelaid=__magic_name__ )
snake_case_ : Optional[int] = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] ,hidden_sizes=[64, 128, 256, 512] ,layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] ,hidden_sizes=[64, 128, 256, 512] ,layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] ,hidden_sizes=[256, 512, 1024, 2048] ,layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(__magic_name__ ,names_to_config[model_name] ,__magic_name__ ,__magic_name__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
return config, expected_shape
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
__lowerCamelCase : Tuple = parser.parse_args()
__lowerCamelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 653 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowerCamelCase : Tuple = logging.get_logger(__name__)
def __UpperCAmelCase ( __magic_name__ )-> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(__magic_name__ ,(list, tuple) ) and isinstance(videos[0] ,(list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__magic_name__ ,(list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__magic_name__ ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class A_ (a_ ):
"""simple docstring"""
a__ = ['''pixel_values''']
def __init__( self :Dict , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Dict[str, int] = None , lowerCAmelCase__ :PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Dict[str, int] = None , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Union[int, float] = 1 / 255 , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , **lowerCAmelCase__ :Optional[int] , ) -> None:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
snake_case_ : Union[str, Any] = size if size is not None else {"shortest_edge": 224}
snake_case_ : List[str] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
snake_case_ : str = crop_size if crop_size is not None else {"height": 224, "width": 224}
snake_case_ : Dict = get_size_dict(lowerCAmelCase__ , param_name="crop_size" )
snake_case_ : List[str] = do_resize
snake_case_ : Optional[Any] = size
snake_case_ : List[Any] = do_center_crop
snake_case_ : Tuple = crop_size
snake_case_ : Optional[int] = resample
snake_case_ : Optional[Any] = do_rescale
snake_case_ : Union[str, Any] = rescale_factor
snake_case_ : List[str] = do_normalize
snake_case_ : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case_ : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _A ( self :Dict , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Dict[str, int] , lowerCAmelCase__ :PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :int , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : List[Any] = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
if "shortest_edge" in size:
snake_case_ : List[str] = get_resize_output_image_size(lowerCAmelCase__ , size["shortest_edge"] , default_to_square=lowerCAmelCase__ )
elif "height" in size and "width" in size:
snake_case_ : Tuple = (size["height"], size["width"])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self :Dict , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Dict[str, int] , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :Dict , ) -> np.ndarray:
'''simple docstring'''
snake_case_ : Optional[Any] = get_size_dict(lowerCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(lowerCAmelCase__ , size=(size["height"], size["width"]) , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self :str , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Union[int, float] , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :Any , ) -> int:
'''simple docstring'''
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self :Dict , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :Union[float, List[float]] , lowerCAmelCase__ :Union[float, List[float]] , lowerCAmelCase__ :Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ :Tuple , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _A ( self :Optional[Any] , lowerCAmelCase__ :ImageInput , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :Dict[str, int] = None , lowerCAmelCase__ :PILImageResampling = None , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :Dict[str, int] = None , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :float = None , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , lowerCAmelCase__ :Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
snake_case_ : Dict = to_numpy_array(lowerCAmelCase__ )
if do_resize:
snake_case_ : List[str] = self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ )
if do_center_crop:
snake_case_ : List[Any] = self.center_crop(lowerCAmelCase__ , size=lowerCAmelCase__ )
if do_rescale:
snake_case_ : int = self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ )
if do_normalize:
snake_case_ : int = self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ )
snake_case_ : Optional[int] = to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ )
return image
def _A ( self :Dict , lowerCAmelCase__ :ImageInput , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :Dict[str, int] = None , lowerCAmelCase__ :PILImageResampling = None , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :Dict[str, int] = None , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :float = None , lowerCAmelCase__ :bool = None , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , lowerCAmelCase__ :Optional[Union[float, List[float]]] = None , lowerCAmelCase__ :Optional[Union[str, TensorType]] = None , lowerCAmelCase__ :ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase__ :str , ) -> PIL.Image.Image:
'''simple docstring'''
snake_case_ : int = do_resize if do_resize is not None else self.do_resize
snake_case_ : Optional[int] = resample if resample is not None else self.resample
snake_case_ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : List[Any] = image_mean if image_mean is not None else self.image_mean
snake_case_ : Any = image_std if image_std is not None else self.image_std
snake_case_ : Dict = size if size is not None else self.size
snake_case_ : Any = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
snake_case_ : List[str] = crop_size if crop_size is not None else self.crop_size
snake_case_ : Optional[Any] = get_size_dict(lowerCAmelCase__ , param_name="crop_size" )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
snake_case_ : List[Any] = make_batched(lowerCAmelCase__ )
snake_case_ : List[Any] = [
[
self._preprocess_image(
image=lowerCAmelCase__ , do_resize=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , do_center_crop=lowerCAmelCase__ , crop_size=lowerCAmelCase__ , do_rescale=lowerCAmelCase__ , rescale_factor=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ , image_mean=lowerCAmelCase__ , image_std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , )
for img in video
]
for video in videos
]
snake_case_ : str = {"pixel_values": videos}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
| 653 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''roc_bert'''
def __init__( self :Dict , lowerCAmelCase__ :Optional[Any]=30_522 , lowerCAmelCase__ :Dict=768 , lowerCAmelCase__ :str=12 , lowerCAmelCase__ :Optional[int]=12 , lowerCAmelCase__ :Optional[Any]=3_072 , lowerCAmelCase__ :Any="gelu" , lowerCAmelCase__ :int=0.1 , lowerCAmelCase__ :Tuple=0.1 , lowerCAmelCase__ :List[str]=512 , lowerCAmelCase__ :int=2 , lowerCAmelCase__ :Optional[int]=0.0_2 , lowerCAmelCase__ :Tuple=1E-1_2 , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :List[str]=0 , lowerCAmelCase__ :Optional[Any]="absolute" , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :List[str]=768 , lowerCAmelCase__ :Optional[Any]=910 , lowerCAmelCase__ :str=512 , lowerCAmelCase__ :int=24_858 , lowerCAmelCase__ :List[Any]=True , **lowerCAmelCase__ :int , ) -> List[str]:
'''simple docstring'''
snake_case_ : int = vocab_size
snake_case_ : Dict = max_position_embeddings
snake_case_ : int = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Union[str, Any] = num_attention_heads
snake_case_ : int = intermediate_size
snake_case_ : Optional[Any] = hidden_act
snake_case_ : Optional[int] = hidden_dropout_prob
snake_case_ : List[Any] = attention_probs_dropout_prob
snake_case_ : Dict = initializer_range
snake_case_ : str = type_vocab_size
snake_case_ : Tuple = layer_norm_eps
snake_case_ : Optional[Any] = use_cache
snake_case_ : Optional[Any] = enable_pronunciation
snake_case_ : List[Any] = enable_shape
snake_case_ : Optional[int] = pronunciation_embed_dim
snake_case_ : Dict = pronunciation_vocab_size
snake_case_ : int = shape_embed_dim
snake_case_ : Any = shape_vocab_size
snake_case_ : Optional[int] = concat_input
snake_case_ : List[Any] = position_embedding_type
snake_case_ : Any = classifier_dropout
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
| 653 | 1 |
'''simple docstring'''
import sys
__lowerCamelCase : List[Any] = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def __UpperCAmelCase ( __magic_name__ = N )-> int:
"""simple docstring"""
snake_case_ : List[Any] = -sys.maxsize - 1
for i in range(len(__magic_name__ ) - 12 ):
snake_case_ : Dict = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
snake_case_ : Any = product
return largest_product
if __name__ == "__main__":
print(f'''{solution() = }''')
| 653 |
'''simple docstring'''
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
def update_area_of_max_square(__magic_name__ ,__magic_name__ ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
snake_case_ : str = update_area_of_max_square(__magic_name__ ,col + 1 )
snake_case_ : Dict = update_area_of_max_square(row + 1 ,col + 1 )
snake_case_ : int = update_area_of_max_square(row + 1 ,__magic_name__ )
if mat[row][col]:
snake_case_ : str = 1 + min([right, diagonal, down] )
snake_case_ : Tuple = max(largest_square_area[0] ,__magic_name__ )
return sub_problem_sol
else:
return 0
snake_case_ : Union[str, Any] = [0]
update_area_of_max_square(0 ,0 )
return largest_square_area[0]
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
__magic_name__ ,__magic_name__ ,__magic_name__ ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
snake_case_ : Dict = update_area_of_max_square_using_dp_array(__magic_name__ ,col + 1 ,__magic_name__ )
snake_case_ : List[Any] = update_area_of_max_square_using_dp_array(row + 1 ,col + 1 ,__magic_name__ )
snake_case_ : Any = update_area_of_max_square_using_dp_array(row + 1 ,__magic_name__ ,__magic_name__ )
if mat[row][col]:
snake_case_ : int = 1 + min([right, diagonal, down] )
snake_case_ : Tuple = max(largest_square_area[0] ,__magic_name__ )
snake_case_ : Optional[Any] = sub_problem_sol
return sub_problem_sol
else:
return 0
snake_case_ : List[Any] = [0]
snake_case_ : Optional[int] = [[-1] * cols for _ in range(__magic_name__ )]
update_area_of_max_square_using_dp_array(0 ,0 ,__magic_name__ )
return largest_square_area[0]
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
snake_case_ : Dict = [[0] * (cols + 1) for _ in range(rows + 1 )]
snake_case_ : Dict = 0
for row in range(rows - 1 ,-1 ,-1 ):
for col in range(cols - 1 ,-1 ,-1 ):
snake_case_ : List[str] = dp_array[row][col + 1]
snake_case_ : Any = dp_array[row + 1][col + 1]
snake_case_ : Any = dp_array[row + 1][col]
if mat[row][col] == 1:
snake_case_ : Any = 1 + min(__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : str = max(dp_array[row][col] ,__magic_name__ )
else:
snake_case_ : Optional[Any] = 0
return largest_square_area
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> int:
"""simple docstring"""
snake_case_ : str = [0] * (cols + 1)
snake_case_ : Tuple = [0] * (cols + 1)
snake_case_ : List[str] = 0
for row in range(rows - 1 ,-1 ,-1 ):
for col in range(cols - 1 ,-1 ,-1 ):
snake_case_ : Optional[Any] = current_row[col + 1]
snake_case_ : Optional[int] = next_row[col + 1]
snake_case_ : Dict = next_row[col]
if mat[row][col] == 1:
snake_case_ : Union[str, Any] = 1 + min(__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : Any = max(current_row[col] ,__magic_name__ )
else:
snake_case_ : Dict = 0
snake_case_ : Optional[Any] = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 653 | 1 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A_ :
"""simple docstring"""
def __init__( self :Optional[int] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[str]=13 , lowerCAmelCase__ :List[str]=7 , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Tuple=99 , lowerCAmelCase__ :Optional[Any]=32 , lowerCAmelCase__ :List[str]=2 , lowerCAmelCase__ :int=4 , lowerCAmelCase__ :Dict=37 , lowerCAmelCase__ :Dict="gelu" , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :Optional[int]=0.1 , lowerCAmelCase__ :str=512 , lowerCAmelCase__ :Optional[int]=16 , lowerCAmelCase__ :List[Any]=2 , lowerCAmelCase__ :Optional[Any]=0.0_2 , lowerCAmelCase__ :Any=3 , lowerCAmelCase__ :Tuple=4 , lowerCAmelCase__ :int=None , ) -> int:
'''simple docstring'''
snake_case_ : Optional[int] = parent
snake_case_ : Union[str, Any] = 13
snake_case_ : Union[str, Any] = 7
snake_case_ : List[str] = True
snake_case_ : Optional[int] = True
snake_case_ : int = True
snake_case_ : Any = True
snake_case_ : List[Any] = 99
snake_case_ : List[Any] = 384
snake_case_ : Tuple = 2
snake_case_ : Tuple = 4
snake_case_ : List[Any] = 37
snake_case_ : str = "gelu"
snake_case_ : Optional[int] = 0.1
snake_case_ : str = 0.1
snake_case_ : Any = 512
snake_case_ : Any = 16
snake_case_ : List[str] = 2
snake_case_ : List[Any] = 0.0_2
snake_case_ : str = 3
snake_case_ : Optional[Any] = 4
snake_case_ : List[str] = 128
snake_case_ : Dict = 2
snake_case_ : Optional[int] = 9
snake_case_ : Dict = 1
snake_case_ : List[str] = None
def _A ( self :int ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : int = None
if self.use_input_mask:
snake_case_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ : Any = None
if self.use_token_type_ids:
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ : Any = None
snake_case_ : Tuple = None
snake_case_ : List[Any] = None
if self.use_labels:
snake_case_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ : Any = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ : Tuple = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowerCAmelCase__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _A ( self :List[str] , lowerCAmelCase__ :Any , lowerCAmelCase__ :int , lowerCAmelCase__ :Any , lowerCAmelCase__ :str , lowerCAmelCase__ :str , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Any ) -> Tuple:
'''simple docstring'''
snake_case_ : int = TFConvBertModel(config=lowerCAmelCase__ )
snake_case_ : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
snake_case_ : List[str] = [input_ids, input_mask]
snake_case_ : Any = model(lowerCAmelCase__ )
snake_case_ : List[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A ( self :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Any , lowerCAmelCase__ :int ) -> Optional[int]:
'''simple docstring'''
snake_case_ : int = TFConvBertForMaskedLM(config=lowerCAmelCase__ )
snake_case_ : Optional[int] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
snake_case_ : Dict = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A ( self :List[str] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :List[Any] ) -> Tuple:
'''simple docstring'''
snake_case_ : List[Any] = self.num_labels
snake_case_ : Union[str, Any] = TFConvBertForSequenceClassification(config=lowerCAmelCase__ )
snake_case_ : List[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
snake_case_ : str = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A ( self :Tuple , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = self.num_choices
snake_case_ : Union[str, Any] = TFConvBertForMultipleChoice(config=lowerCAmelCase__ )
snake_case_ : Optional[int] = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
snake_case_ : Optional[int] = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
snake_case_ : List[str] = tf.tile(tf.expand_dims(lowerCAmelCase__ , 1 ) , (1, self.num_choices, 1) )
snake_case_ : Tuple = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
snake_case_ : Tuple = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A ( self :List[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :int , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Tuple ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = self.num_labels
snake_case_ : List[str] = TFConvBertForTokenClassification(config=lowerCAmelCase__ )
snake_case_ : List[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
snake_case_ : List[str] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A ( self :Optional[int] , lowerCAmelCase__ :Any , lowerCAmelCase__ :Any , lowerCAmelCase__ :str , lowerCAmelCase__ :int , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :str ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = TFConvBertForQuestionAnswering(config=lowerCAmelCase__ )
snake_case_ : Tuple = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
snake_case_ : Optional[Any] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A ( self :Dict ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) : Tuple = config_and_inputs
snake_case_ : Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A_ (a_ , a_ , unittest.TestCase ):
"""simple docstring"""
a__ = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
a__ = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a__ = False
a__ = False
a__ = False
def _A ( self :Any ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = TFConvBertModelTester(self )
snake_case_ : Dict = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def _A ( self :Any ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def _A ( self :Optional[Any] ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _A ( self :Optional[Any] ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__ )
def _A ( self :Any ) -> Tuple:
'''simple docstring'''
snake_case_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase__ )
def _A ( self :Any ) -> int:
'''simple docstring'''
snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
def _A ( self :Any ) -> str:
'''simple docstring'''
snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def _A ( self :Tuple ) -> str:
'''simple docstring'''
snake_case_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase__ )
@slow
def _A ( self :List[Any] ) -> Tuple:
'''simple docstring'''
snake_case_, snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : int = True
snake_case_ : Dict = True
if hasattr(lowerCAmelCase__ , "use_cache" ):
snake_case_ : Optional[Any] = True
snake_case_ : int = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
snake_case_ : List[Any] = getattr(self.model_tester , "key_length" , lowerCAmelCase__ )
for model_class in self.all_model_classes:
snake_case_ : Any = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
snake_case_ : Any = model_class(lowerCAmelCase__ )
snake_case_ : Tuple = len(model(lowerCAmelCase__ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase__ , saved_model=lowerCAmelCase__ )
snake_case_ : Dict = os.path.join(lowerCAmelCase__ , "saved_model" , "1" )
snake_case_ : Tuple = tf.keras.models.load_model(lowerCAmelCase__ )
snake_case_ : int = model(lowerCAmelCase__ )
if self.is_encoder_decoder:
snake_case_ : Any = outputs["encoder_hidden_states"]
snake_case_ : Dict = outputs["encoder_attentions"]
else:
snake_case_ : List[Any] = outputs["hidden_states"]
snake_case_ : Any = outputs["attentions"]
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
snake_case_ : Tuple = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def _A ( self :Dict ) -> Tuple:
'''simple docstring'''
snake_case_ : int = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(lowerCAmelCase__ )
def _A ( self :List[str] ) -> Any:
'''simple docstring'''
snake_case_, snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : List[Any] = True
snake_case_ : Any = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
snake_case_ : Union[str, Any] = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
snake_case_ : Dict = getattr(self.model_tester , "key_length" , lowerCAmelCase__ )
snake_case_ : Dict = getattr(self.model_tester , "key_length" , lowerCAmelCase__ )
def check_decoder_attentions_output(lowerCAmelCase__ :List[Any] ):
snake_case_ : Optional[Any] = len(lowerCAmelCase__ )
self.assertEqual(out_len % 2 , 0 )
snake_case_ : List[str] = outputs.decoder_attentions
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(lowerCAmelCase__ :int ):
snake_case_ : List[str] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(lowerCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
snake_case_ : Dict = True
snake_case_ : Optional[int] = False
snake_case_ : Union[str, Any] = model_class(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = model(self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
snake_case_ : Union[str, Any] = len(lowerCAmelCase__ )
self.assertEqual(config.output_hidden_states , lowerCAmelCase__ )
check_encoder_attentions_output(lowerCAmelCase__ )
if self.is_encoder_decoder:
snake_case_ : int = model_class(lowerCAmelCase__ )
snake_case_ : Dict = model(self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(config.output_hidden_states , lowerCAmelCase__ )
check_decoder_attentions_output(lowerCAmelCase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
snake_case_ : str = True
snake_case_ : Dict = model_class(lowerCAmelCase__ )
snake_case_ : Tuple = model(self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(config.output_hidden_states , lowerCAmelCase__ )
check_encoder_attentions_output(lowerCAmelCase__ )
# Check attention is always last and order is fine
snake_case_ : Any = True
snake_case_ : Tuple = True
snake_case_ : Optional[Any] = model_class(lowerCAmelCase__ )
snake_case_ : Union[str, Any] = model(self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowerCAmelCase__ ) )
self.assertEqual(model.config.output_hidden_states , lowerCAmelCase__ )
check_encoder_attentions_output(lowerCAmelCase__ )
@require_tf
class A_ (unittest.TestCase ):
"""simple docstring"""
@slow
def _A ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[Any] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
snake_case_ : Any = tf.constant([[0, 1, 2, 3, 4, 5]] )
snake_case_ : Any = model(lowerCAmelCase__ )[0]
snake_case_ : int = [1, 6, 768]
self.assertEqual(output.shape , lowerCAmelCase__ )
snake_case_ : List[Any] = tf.constant(
[
[
[-0.0_3_4_7_5_4_9_3, -0.4_6_8_6_0_3_4, -0.3_0_6_3_8_8_3_2],
[0.2_2_6_3_7_2_4_8, -0.2_6_9_8_8_6_4_6, -0.7_4_2_3_4_2_4],
[0.1_0_3_2_4_8_6_8, -0.4_5_0_1_3_5_0_8, -0.5_8_2_8_0_7_8_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCAmelCase__ , atol=1E-4 )
| 653 |
'''simple docstring'''
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __UpperCAmelCase ( __magic_name__ ,__magic_name__=7 )-> Tuple:
"""simple docstring"""
snake_case_ : List[str] = None
if token is not None:
snake_case_ : List[str] = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
# The id of a workflow (not of a workflow run)
snake_case_ : Dict = "636036"
snake_case_ : List[str] = F'''https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'''
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'''?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'''
snake_case_ : Optional[Any] = requests.get(__magic_name__ ,headers=__magic_name__ ).json()
return result["workflow_runs"]
def __UpperCAmelCase ( __magic_name__ )-> Union[str, Any]:
"""simple docstring"""
snake_case_ : str = get_daily_ci_runs(__magic_name__ )
snake_case_ : Optional[int] = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
snake_case_ : Dict = workflow_run["id"]
break
return workflow_run_id
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = get_last_daily_ci_runs(__magic_name__ )
if workflow_run_id is not None:
snake_case_ : Union[str, Any] = get_artifacts_links(worflow_run_id=__magic_name__ ,token=__magic_name__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
snake_case_ : Union[str, Any] = artifacts_links[artifact_name]
download_artifact(
artifact_name=__magic_name__ ,artifact_url=__magic_name__ ,output_dir=__magic_name__ ,token=__magic_name__ )
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ ,__magic_name__ )-> List[Any]:
"""simple docstring"""
get_last_daily_ci_artifacts(__magic_name__ ,__magic_name__ ,__magic_name__ )
snake_case_ : Union[str, Any] = {}
for artifact_name in artifact_names:
snake_case_ : Any = os.path.join(__magic_name__ ,F'''{artifact_name}.zip''' )
if os.path.isfile(__magic_name__ ):
snake_case_ : Tuple = {}
with zipfile.ZipFile(__magic_name__ ) as z:
for filename in z.namelist():
if not os.path.isdir(__magic_name__ ):
# read the file
with z.open(__magic_name__ ) as f:
snake_case_ : Optional[Any] = f.read().decode("UTF-8" )
return results
| 653 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : List[Any] = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class A_ (a_ ):
"""simple docstring"""
a__ = '''lxmert'''
a__ = {}
def __init__( self :List[str] , lowerCAmelCase__ :Tuple=30_522 , lowerCAmelCase__ :List[Any]=768 , lowerCAmelCase__ :Dict=12 , lowerCAmelCase__ :Dict=9_500 , lowerCAmelCase__ :str=1_600 , lowerCAmelCase__ :List[Any]=400 , lowerCAmelCase__ :Tuple=3_072 , lowerCAmelCase__ :int="gelu" , lowerCAmelCase__ :Optional[int]=0.1 , lowerCAmelCase__ :Dict=0.1 , lowerCAmelCase__ :Any=512 , lowerCAmelCase__ :str=2 , lowerCAmelCase__ :Any=0.0_2 , lowerCAmelCase__ :int=1E-1_2 , lowerCAmelCase__ :Optional[int]=9 , lowerCAmelCase__ :Optional[Any]=5 , lowerCAmelCase__ :List[str]=5 , lowerCAmelCase__ :Optional[Any]=2_048 , lowerCAmelCase__ :Union[str, Any]=4 , lowerCAmelCase__ :Optional[Any]=6.6_7 , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :Any=True , lowerCAmelCase__ :Optional[int]=True , lowerCAmelCase__ :Tuple=True , **lowerCAmelCase__ :Optional[int] , ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = vocab_size
snake_case_ : Optional[int] = hidden_size
snake_case_ : List[str] = num_attention_heads
snake_case_ : Union[str, Any] = hidden_act
snake_case_ : Any = intermediate_size
snake_case_ : str = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : Union[str, Any] = max_position_embeddings
snake_case_ : Optional[Any] = type_vocab_size
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : str = layer_norm_eps
snake_case_ : Union[str, Any] = num_qa_labels
snake_case_ : Tuple = num_object_labels
snake_case_ : Any = num_attr_labels
snake_case_ : List[str] = l_layers
snake_case_ : List[str] = x_layers
snake_case_ : Union[str, Any] = r_layers
snake_case_ : int = visual_feat_dim
snake_case_ : List[str] = visual_pos_dim
snake_case_ : List[Any] = visual_loss_normalizer
snake_case_ : int = task_matched
snake_case_ : List[str] = task_mask_lm
snake_case_ : Tuple = task_obj_predict
snake_case_ : int = task_qa
snake_case_ : int = visual_obj_loss
snake_case_ : Tuple = visual_attr_loss
snake_case_ : Optional[Any] = visual_feat_loss
snake_case_ : Union[str, Any] = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
super().__init__(**lowerCAmelCase__ )
| 653 |
'''simple docstring'''
from string import ascii_uppercase
__lowerCamelCase : Optional[Any] = {char: i for i, char in enumerate(ascii_uppercase)}
__lowerCamelCase : List[str] = dict(enumerate(ascii_uppercase))
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Tuple = len(__magic_name__ )
snake_case_ : str = 0
while True:
if x == i:
snake_case_ : List[str] = 0
if len(__magic_name__ ) == len(__magic_name__ ):
break
key += key[i]
i += 1
return key
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : str = ""
snake_case_ : List[Any] = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
snake_case_ : Optional[Any] = (dicta[letter] - dicta[key_new[i]]) % 26
i += 1
cipher_text += dicta[x]
return cipher_text
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> str:
"""simple docstring"""
snake_case_ : Dict = ""
snake_case_ : Dict = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
snake_case_ : str = (dicta[letter] + dicta[key_new[i]] + 26) % 26
i += 1
or_txt += dicta[x]
return or_txt
def __UpperCAmelCase ( )-> None:
"""simple docstring"""
snake_case_ : List[str] = "THE GERMAN ATTACK"
snake_case_ : List[str] = "SECRET"
snake_case_ : Optional[int] = generate_key(__magic_name__ ,__magic_name__ )
snake_case_ : Any = cipher_text(__magic_name__ ,__magic_name__ )
print(F'''Encrypted Text = {s}''' )
print(F'''Original Text = {original_text(__magic_name__ ,__magic_name__ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 653 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.