code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def UpperCamelCase__ ( lowercase__ : int ):
return getitem, k
def UpperCamelCase__ ( lowercase__ : Union[str, Any] , lowercase__ : Optional[Any] ):
return setitem, k, v
def UpperCamelCase__ ( lowercase__ : int ):
return delitem, k
def UpperCamelCase__ ( lowercase__ : Tuple , lowercase__ : Tuple , *lowercase__ : Union[str, Any] ):
try:
return fun(lowercase__ , *lowercase__ ), None
except Exception as e:
return None, e
__A = (
_set("key_a", "val_a"),
_set("key_b", "val_b"),
)
__A = [
_set("key_a", "val_a"),
_set("key_a", "val_b"),
]
__A = [
_set("key_a", "val_a"),
_set("key_b", "val_b"),
_del("key_a"),
_del("key_b"),
_set("key_a", "val_a"),
_del("key_a"),
]
__A = [
_get("key_a"),
_del("key_a"),
_set("key_a", "val_a"),
_del("key_a"),
_del("key_a"),
_get("key_a"),
]
__A = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
__A = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("key_a", "val_b"),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def UpperCamelCase__ ( lowercase__ : Union[str, Any] ):
snake_case : Any = HashMap(initial_block_size=4 )
snake_case : int = {}
for _, (fun, *args) in enumerate(lowercase__ ):
snake_case , snake_case : Optional[Any] = _run_operation(lowercase__ , lowercase__ , *lowercase__ )
snake_case , snake_case : Union[str, Any] = _run_operation(lowercase__ , lowercase__ , *lowercase__ )
assert my_res == py_res
assert str(lowercase__ ) == str(lowercase__ )
assert set(lowercase__ ) == set(lowercase__ )
assert len(lowercase__ ) == len(lowercase__ )
assert set(my.items() ) == set(py.items() )
def UpperCamelCase__ ( ):
def is_public(lowercase__ : str ) -> bool:
return not name.startswith("_" )
snake_case : List[str] = {name for name in dir({} ) if is_public(lowercase__ )}
snake_case : Optional[int] = {name for name in dir(HashMap() ) if is_public(lowercase__ )}
assert dict_public_names > hash_public_names
| 134
|
"""simple docstring"""
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = {
"facebook/mask2former-swin-small-coco-instance": (
"https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
__A = logging.get_logger(__name__)
class lowerCamelCase__ ( lowerCamelCase_ ):
a__ : Tuple = """mask2former"""
a__ : List[str] = ["""swin"""]
a__ : Any = {"""hidden_size""": """hidden_dim"""}
def __init__( self , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 256 , SCREAMING_SNAKE_CASE = 256 , SCREAMING_SNAKE_CASE = 256 , SCREAMING_SNAKE_CASE = 1_024 , SCREAMING_SNAKE_CASE = "relu" , SCREAMING_SNAKE_CASE = 6 , SCREAMING_SNAKE_CASE = 10 , SCREAMING_SNAKE_CASE = 8 , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = 2_048 , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = 4 , SCREAMING_SNAKE_CASE = 255 , SCREAMING_SNAKE_CASE = 100 , SCREAMING_SNAKE_CASE = 0.1 , SCREAMING_SNAKE_CASE = 2.0 , SCREAMING_SNAKE_CASE = 5.0 , SCREAMING_SNAKE_CASE = 5.0 , SCREAMING_SNAKE_CASE = 12_544 , SCREAMING_SNAKE_CASE = 3.0 , SCREAMING_SNAKE_CASE = 0.75 , SCREAMING_SNAKE_CASE = 0.02 , SCREAMING_SNAKE_CASE = 1.0 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = [4, 8, 16, 32] , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `Swin` backbone." )
snake_case : Union[str, Any] = CONFIG_MAPPING["swin"](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=SCREAMING_SNAKE_CASE , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
snake_case : Union[str, Any] = backbone_config.pop("model_type" )
snake_case : Dict = CONFIG_MAPPING[backbone_model_type]
snake_case : int = config_class.from_dict(SCREAMING_SNAKE_CASE )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '''
F'''Supported model types: {','.join(self.backbones_supported )}''' )
snake_case : Any = backbone_config
snake_case : List[Any] = feature_size
snake_case : str = mask_feature_size
snake_case : Any = hidden_dim
snake_case : Optional[Any] = encoder_feedforward_dim
snake_case : Any = activation_function
snake_case : Optional[Any] = encoder_layers
snake_case : str = decoder_layers
snake_case : Union[str, Any] = num_attention_heads
snake_case : str = dropout
snake_case : Optional[Any] = dim_feedforward
snake_case : Optional[int] = pre_norm
snake_case : Optional[int] = enforce_input_projection
snake_case : Any = common_stride
snake_case : Optional[int] = ignore_value
snake_case : int = num_queries
snake_case : Optional[int] = no_object_weight
snake_case : Optional[Any] = class_weight
snake_case : int = mask_weight
snake_case : Dict = dice_weight
snake_case : int = train_num_points
snake_case : str = oversample_ratio
snake_case : List[Any] = importance_sample_ratio
snake_case : Any = init_std
snake_case : List[str] = init_xavier_std
snake_case : int = use_auxiliary_loss
snake_case : str = feature_strides
snake_case : List[Any] = output_auxiliary_logits
snake_case : Any = decoder_layers
super().__init__(**SCREAMING_SNAKE_CASE )
@classmethod
def lowerCamelCase_ ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return cls(
backbone_config=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : str = copy.deepcopy(self.__dict__ )
snake_case : str = self.backbone_config.to_dict()
snake_case : Optional[Any] = self.__class__.model_type
return output
| 134
| 1
|
'''simple docstring'''
import operator as op
__A = '''scaler.pt'''
__A = '''pytorch_model'''
__A = '''random_states'''
__A = '''optimizer'''
__A = '''scheduler'''
__A = '''pytorch_model.bin'''
__A = '''pytorch_model.bin.index.json'''
__A = '''model.safetensors'''
__A = '''model.safetensors.index.json'''
__A = '''1.10.2'''
__A = '''py38'''
__A = '''4.17.0'''
__A = ['''ml.p3.16xlarge''', '''ml.p3dn.24xlarge''', '''ml.p4dn.24xlarge''']
__A = ['''FULL_SHARD''', '''SHARD_GRAD_OP''', '''NO_SHARD''', '''HYBRID_SHARD''', '''HYBRID_SHARD_ZERO2''']
__A = ['''TRANSFORMER_BASED_WRAP''', '''SIZE_BASED_WRAP''', '''NO_WRAP''']
__A = ['''BACKWARD_PRE''', '''BACKWARD_POST''', '''NO_PREFETCH''']
__A = ['''FULL_STATE_DICT''', '''LOCAL_STATE_DICT''', '''SHARDED_STATE_DICT''']
__A = '''2.0.1'''
__A = ['''pdsh''', '''standard''', '''openmpi''', '''mvapich''']
__A = ['''default''', '''reduce-overhead''', '''max-autotune''']
__A = {'''>''': op.gt, '''>=''': op.ge, '''==''': op.eq, '''!=''': op.ne, '''<=''': op.le, '''<''': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
__A = [
'''nnodes''',
'''nproc_per_node''',
'''rdzv_backend''',
'''rdzv_endpoint''',
'''rdzv_id''',
'''rdzv_conf''',
'''standalone''',
'''max_restarts''',
'''monitor_interval''',
'''start_method''',
'''role''',
'''module''',
'''m''',
'''no_python''',
'''run_path''',
'''log_dir''',
'''r''',
'''redirects''',
'''t''',
'''tee''',
'''node_rank''',
'''master_addr''',
'''master_port''',
]
__A = ['''DEEPSPEED''', '''MULTI_GPU''', '''FSDP''', '''MEGATRON_LM''']
__A = ['''DEEPSPEED''', '''MULTI_XPU''', '''FSDP''']
| 61
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 61
| 1
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = "xlnet"
UpperCAmelCase__ : Dict = ["mems"]
UpperCAmelCase__ : str = {
"n_token": "vocab_size", # Backward compatibility
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _a=3_2_0_0_0 , _a=1_0_2_4 , _a=2_4 , _a=1_6 , _a=4_0_9_6 , _a="gelu" , _a=True , _a="bi" , _a=0.02 , _a=1e-1_2 , _a=0.1 , _a=5_1_2 , _a=None , _a=True , _a=False , _a=False , _a=-1 , _a=False , _a="last" , _a=True , _a="tanh" , _a=0.1 , _a=5 , _a=5 , _a=5 , _a=1 , _a=2 , **_a , ) -> int:
_a : Any = vocab_size
_a : Any = d_model
_a : Any = n_layer
_a : Optional[Any] = n_head
if d_model % n_head != 0:
raise ValueError(F"""'d_model % n_head' ({d_model % n_head}) should be equal to 0""" )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F"""`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})""" )
_a : Tuple = d_model // n_head
_a : int = ff_activation
_a : str = d_inner
_a : int = untie_r
_a : List[str] = attn_type
_a : Union[str, Any] = initializer_range
_a : Union[str, Any] = layer_norm_eps
_a : Tuple = dropout
_a : Optional[int] = mem_len
_a : Union[str, Any] = reuse_len
_a : Dict = bi_data
_a : List[str] = clamp_len
_a : Tuple = same_length
_a : List[Any] = summary_type
_a : Tuple = summary_use_proj
_a : Dict = summary_activation
_a : Dict = summary_last_dropout
_a : int = start_n_top
_a : int = end_n_top
_a : Any = bos_token_id
_a : Optional[Any] = pad_token_id
_a : int = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'''The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'''
''' instead.''' , _a , )
_a : Optional[Any] = kwargs['''use_cache''']
_a : List[str] = use_mems_eval
_a : List[Any] = use_mems_train
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
@property
def __lowercase ( self ) -> List[str]:
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def __lowercase ( self , _a ) -> Tuple:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 14
|
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=[10, 20, 30, 40] , _SCREAMING_SNAKE_CASE=[2, 2, 3, 2] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=["stage2", "stage3", "stage4"] , _SCREAMING_SNAKE_CASE=[2, 3, 4] , _SCREAMING_SNAKE_CASE=None , ):
'''simple docstring'''
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = num_channels
lowerCAmelCase = num_stages
lowerCAmelCase = hidden_sizes
lowerCAmelCase = depths
lowerCAmelCase = is_training
lowerCAmelCase = use_labels
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = num_labels
lowerCAmelCase = initializer_range
lowerCAmelCase = out_features
lowerCAmelCase = out_indices
lowerCAmelCase = scope
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = ConvNextModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(_SCREAMING_SNAKE_CASE )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = ConvNextForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = ConvNextBackbone(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(_SCREAMING_SNAKE_CASE )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCAmelCase = None
lowerCAmelCase = ConvNextBackbone(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(_SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( a_ , a_ , unittest.TestCase ):
SCREAMING_SNAKE_CASE : List[Any] = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : int = (
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : Optional[int] = False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = ConvNextModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return
@unittest.skip(reason='ConvNext does not use inputs_embeds' )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='ConvNext does not support input and output embeddings' )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='ConvNext does not use feedforward chunking' )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = ConvNextModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def snake_case ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
@cached_property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.default_image_processor
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowerCAmelCase = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
lowerCAmelCase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.tensor([-0.0_260, -0.4_739, 0.1_911] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@require_torch
class _snake_case ( unittest.TestCase , a_ ):
SCREAMING_SNAKE_CASE : Dict = (ConvNextBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Tuple = ConvNextConfig
SCREAMING_SNAKE_CASE : Dict = False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = ConvNextModelTester(self )
| 284
| 0
|
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a : int = logging.get_logger(__name__)
__a : Tuple = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__a : Optional[Any] = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
__a : Any = {'''facebook/blenderbot_small-90M''': 5_1_2}
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> str:
lowercase__ : str = set()
lowercase__ : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ : Any = char
lowercase__ : int = set(SCREAMING_SNAKE_CASE_ )
return pairs
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
a : Tuple = VOCAB_FILES_NAMES
a : Dict = PRETRAINED_VOCAB_FILES_MAP
a : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : str = ["""input_ids""", """attention_mask"""]
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase="__start__" , lowerCamelCase="__end__" , lowerCamelCase="__unk__" , lowerCamelCase="__null__" , **lowerCamelCase , ) -> List[str]:
"""simple docstring"""
super().__init__(unk_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , pad_token=lowerCamelCase , **lowerCamelCase )
with open(lowerCamelCase , encoding="utf-8" ) as vocab_handle:
lowercase__ : Optional[int] = json.load(lowerCamelCase )
lowercase__ : List[Any] = {v: k for k, v in self.encoder.items()}
with open(lowerCamelCase , encoding="utf-8" ) as merges_handle:
lowercase__ : Any = merges_handle.read().split("\n" )[1:-1]
lowercase__ : Dict = [tuple(merge.split() ) for merge in merges]
lowercase__ : Any = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
lowercase__ : Dict = {}
@property
def __a ( self ) -> int:
"""simple docstring"""
return len(self.encoder )
def __a ( self ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self , lowerCamelCase ) -> str:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowercase__ : str = re.sub("([.,!?()])" , r" \1" , lowerCamelCase )
lowercase__ : Dict = re.sub("(')" , r" \1 " , lowerCamelCase )
lowercase__ : Union[str, Any] = re.sub(r"\s{2,}" , " " , lowerCamelCase )
if "\n" in token:
lowercase__ : Optional[Any] = token.replace("\n" , " __newln__" )
lowercase__ : Optional[Any] = token.split(" " )
lowercase__ : Union[str, Any] = []
for token in tokens:
if not len(lowerCamelCase ):
continue
lowercase__ : Union[str, Any] = token.lower()
lowercase__ : Any = tuple(lowerCamelCase )
lowercase__ : Tuple = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
lowercase__ : Optional[int] = get_pairs(lowerCamelCase )
if not pairs:
words.append(lowerCamelCase )
continue
while True:
lowercase__ : str = min(lowerCamelCase , key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowercase__ , lowercase__ : int = bigram
lowercase__ : str = []
lowercase__ : int = 0
while i < len(lowerCamelCase ):
try:
lowercase__ : List[str] = word.index(lowerCamelCase , lowerCamelCase )
new_word.extend(word[i:j] )
lowercase__ : Tuple = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase__ : int = tuple(lowerCamelCase )
lowercase__ : Tuple = new_word
if len(lowerCamelCase ) == 1:
break
else:
lowercase__ : Optional[Any] = get_pairs(lowerCamelCase )
lowercase__ : Tuple = "@@ ".join(lowerCamelCase )
lowercase__ : Optional[Any] = word[:-4]
lowercase__ : int = word
words.append(lowerCamelCase )
return " ".join(lowerCamelCase )
def __a ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
lowercase__ : Dict = []
lowercase__ : Dict = re.findall(r"\S+\n?" , lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(lowerCamelCase ).split(" " ) ) )
return split_tokens
def __a ( self , lowerCamelCase ) -> int:
"""simple docstring"""
lowercase__ : Optional[Any] = token.lower()
return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) )
def __a ( self , lowerCamelCase ) -> str:
"""simple docstring"""
return self.decoder.get(lowerCamelCase , self.unk_token )
def __a ( self , lowerCamelCase ) -> str:
"""simple docstring"""
lowercase__ : Optional[Any] = " ".join(lowerCamelCase ).replace("@@ " , "" ).strip()
return out_string
def __a ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : str = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : Dict = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase , ensure_ascii=lowerCamelCase ) + "\n" )
lowercase__ : List[Any] = 0
with open(lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
lowercase__ : str = token_index
writer.write(" ".join(lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
| 298
|
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> str:
lowercase__ : int = []
for line in lines:
lowercase__ : Union[str, Any] = re.sub(r"#.*" ,"" ,SCREAMING_SNAKE_CASE_ ) # remove comments
if line:
filtered_lines.append(SCREAMING_SNAKE_CASE_ )
lowercase__ : Tuple = "\n".join(SCREAMING_SNAKE_CASE_ )
# Make a hash from all this code
lowercase__ : Optional[int] = full_str.encode("utf-8" )
return shaaaa(SCREAMING_SNAKE_CASE_ ).hexdigest()
# get importable module names and hash for caching
__a : List[str] = {
'''csv''': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'''json''': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'''pandas''': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'''parquet''': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'''arrow''': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'''text''': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'''imagefolder''': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'''audiofolder''': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
__a : List[Any] = {
'''.csv''': ('''csv''', {}),
'''.tsv''': ('''csv''', {'''sep''': '''\t'''}),
'''.json''': ('''json''', {}),
'''.jsonl''': ('''json''', {}),
'''.parquet''': ('''parquet''', {}),
'''.arrow''': ('''arrow''', {}),
'''.txt''': ('''text''', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
__a : Union[str, Any] = {'''imagefolder''', '''audiofolder'''}
# Used to filter data files based on extensions given a module name
__a : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('''.zip''')
_MODULE_TO_EXTENSIONS["audiofolder"].append('''.zip''')
| 298
| 1
|
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
UpperCamelCase__ : Any = 1_00
UpperCamelCase__ : List[str] = set(range(3, NUM_PRIMES, 2))
primes.add(2)
UpperCamelCase__ : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00 )
def lowerCAmelCase_ ( _lowerCamelCase: int ):
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
__SCREAMING_SNAKE_CASE : set[int] = set()
__SCREAMING_SNAKE_CASE : int
__SCREAMING_SNAKE_CASE : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def lowerCAmelCase_ ( _lowerCamelCase: int = 50_00 ):
for number_to_partition in range(1 , _lowerCamelCase ):
if len(partition(_lowerCamelCase ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f"{solution() = }")
| 578
|
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : Tuple = ''''''
_A : Dict = '''hf-legacy''' # "hf://"" is reserved for hffs
def __init__( self : List[Any] , lowerCAmelCase__ : Optional[DatasetInfo] = None , lowerCAmelCase__ : Optional[str] = None , **lowerCAmelCase__ : str , ):
"""simple docstring"""
super().__init__(self , **lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = repo_info
__SCREAMING_SNAKE_CASE : Dict = token
__SCREAMING_SNAKE_CASE : Dict = None
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
if self.dir_cache is None:
__SCREAMING_SNAKE_CASE : Optional[int] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__SCREAMING_SNAKE_CASE : str = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(lowerCAmelCase__ ): {"""name""": str(lowerCAmelCase__ ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def UpperCamelCase__ ( self : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : str = "rb" , **lowerCAmelCase__ : Optional[Any] , ):
"""simple docstring"""
if not isinstance(self.repo_info , lowerCAmelCase__ ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
__SCREAMING_SNAKE_CASE : Tuple = hf_hub_url(self.repo_info.id , lowerCAmelCase__ , revision=self.repo_info.sha )
return fsspec.open(
lowerCAmelCase__ , mode=lowerCAmelCase__ , headers=get_authentication_headers_for_url(lowerCAmelCase__ , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def UpperCamelCase__ ( self : Optional[int] , lowerCAmelCase__ : Union[str, Any] , **lowerCAmelCase__ : Any ):
"""simple docstring"""
self._get_dirs()
__SCREAMING_SNAKE_CASE : Dict = self._strip_protocol(lowerCAmelCase__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(lowerCAmelCase__ )
def UpperCamelCase__ ( self : int , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any]=False , **lowerCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
self._get_dirs()
__SCREAMING_SNAKE_CASE : Union[str, Any] = PurePosixPath(path.strip("""/""" ) )
__SCREAMING_SNAKE_CASE : Dict = {}
for p, f in self.dir_cache.items():
__SCREAMING_SNAKE_CASE : str = PurePosixPath(p.strip("""/""" ) )
__SCREAMING_SNAKE_CASE : Dict = p.parent
if root == path:
__SCREAMING_SNAKE_CASE : int = f
__SCREAMING_SNAKE_CASE : int = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 578
| 1
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ,unittest.TestCase ):
'''simple docstring'''
__a : Tuple = CanineTokenizer
__a : str = False
def A ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
UpperCamelCase__ = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A ( self : str ) -> str:
'''simple docstring'''
return CanineTokenizer.from_pretrained("""google/canine-s""" )
def A ( self : List[str] , **lowercase : int ) -> CanineTokenizer:
'''simple docstring'''
UpperCamelCase__ = self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
UpperCamelCase__ = 1_0_2_4
return tokenizer
@require_torch
def A ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = self.canine_tokenizer
UpperCamelCase__ = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""]
# fmt: off
UpperCamelCase__ = [5_7_3_4_4, 7_6, 1_0_5, 1_0_2, 1_0_1, 3_2, 1_0_5, 1_1_5, 3_2, 1_0_8, 1_0_5, 1_0_7, 1_0_1, 3_2, 9_7, 3_2, 9_8, 1_1_1, 1_2_0, 3_2, 1_1_1, 1_0_2, 3_2, 9_9, 1_0_4, 1_1_1, 9_9, 1_1_1, 1_0_8, 9_7, 1_1_6, 1_0_1, 1_1_5, 4_6, 5_7_3_4_5, 0, 0, 0, 0]
# fmt: on
UpperCamelCase__ = tokenizer(lowercase , padding=lowercase , return_tensors="""pt""" )
self.assertIsInstance(lowercase , lowercase )
UpperCamelCase__ = list(batch.input_ids.numpy()[0] )
self.assertListEqual(lowercase , lowercase )
self.assertEqual((2, 3_9) , batch.input_ids.shape )
self.assertEqual((2, 3_9) , batch.attention_mask.shape )
@require_torch
def A ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = self.canine_tokenizer
UpperCamelCase__ = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""]
UpperCamelCase__ = tokenizer(lowercase , padding=lowercase , return_tensors="""pt""" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("""input_ids""" , lowercase )
self.assertIn("""attention_mask""" , lowercase )
self.assertIn("""token_type_ids""" , lowercase )
@require_torch
def A ( self : List[str] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = self.canine_tokenizer
UpperCamelCase__ = [
"""What's the weater?""",
"""It's about 25 degrees.""",
]
UpperCamelCase__ = tokenizer(
text_target=lowercase , max_length=3_2 , padding="""max_length""" , truncation=lowercase , return_tensors="""pt""" )
self.assertEqual(3_2 , targets["""input_ids"""].shape[1] )
def A ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 4_2 )
# Now let's start the test
UpperCamelCase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCamelCase__ = tempfile.mkdtemp()
UpperCamelCase__ = """ He is very happy, UNwant\u00E9d,running"""
UpperCamelCase__ = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
UpperCamelCase__ = tokenizer.__class__.from_pretrained(lowercase )
UpperCamelCase__ = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
shutil.rmtree(lowercase )
UpperCamelCase__ = self.get_tokenizers(model_max_length=4_2 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCamelCase__ = tempfile.mkdtemp()
UpperCamelCase__ = """ He is very happy, UNwant\u00E9d,running"""
UpperCamelCase__ = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
UpperCamelCase__ = chr(0xE007 )
additional_special_tokens.append(lowercase )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
UpperCamelCase__ = tokenizer.encode(lowercase , add_special_tokens=lowercase )
tokenizer.save_pretrained(lowercase )
UpperCamelCase__ = tokenizer.__class__.from_pretrained(lowercase )
UpperCamelCase__ = after_tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertListEqual(lowercase , lowercase )
self.assertIn(lowercase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 4_2 )
UpperCamelCase__ = tokenizer.__class__.from_pretrained(lowercase , model_max_length=4_3 )
self.assertEqual(tokenizer.model_max_length , 4_3 )
shutil.rmtree(lowercase )
def A ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = self.get_tokenizers(do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
UpperCamelCase__ , UpperCamelCase__ = self.get_clean_sequence(lowercase )
# a special token for Canine can be defined as follows:
UpperCamelCase__ = 0xE005
UpperCamelCase__ = chr(lowercase )
tokenizer.add_special_tokens({"""cls_token""": special_token} )
UpperCamelCase__ = tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertEqual(len(lowercase ) , 1 )
UpperCamelCase__ = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=lowercase )
UpperCamelCase__ = tokenizer.encode(lowercase , add_special_tokens=lowercase )
UpperCamelCase__ = tokenizer.encode(lowercase , add_special_tokens=lowercase )
UpperCamelCase__ = tokenizer.encode(lowercase , add_special_tokens=lowercase )
self.assertEqual(lowercase , input_encoded + special_token_id )
UpperCamelCase__ = tokenizer.decode(lowercase , skip_special_tokens=lowercase )
self.assertTrue(special_token not in decoded )
def A ( self : str ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = self.get_tokenizers(do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
UpperCamelCase__ = chr(0xE005 )
UpperCamelCase__ = chr(0xE006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=lowercase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} )
UpperCamelCase__ = tokenizer.tokenize(lowercase )
UpperCamelCase__ = tokenizer.tokenize(lowercase )
self.assertEqual(len(lowercase ) , 1 )
self.assertEqual(len(lowercase ) , 1 )
self.assertEqual(token_a[0] , lowercase )
self.assertEqual(token_a[0] , lowercase )
@require_tokenizers
def A ( self : str ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = self.get_tokenizers(do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# a special token for Canine can be defined as follows:
UpperCamelCase__ = 0xE006
UpperCamelCase__ = chr(lowercase )
UpperCamelCase__ = AddedToken(lowercase , lstrip=lowercase )
tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(lowercase )
tokenizer.from_pretrained(lowercase )
def A ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowercase )
with open(os.path.join(lowercase , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
UpperCamelCase__ = json.load(lowercase )
with open(os.path.join(lowercase , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
UpperCamelCase__ = json.load(lowercase )
# a special token for Canine can be defined as follows:
UpperCamelCase__ = 0xE006
UpperCamelCase__ = chr(lowercase )
UpperCamelCase__ = [new_token_a]
UpperCamelCase__ = [new_token_a]
with open(os.path.join(lowercase , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowercase , lowercase )
with open(os.path.join(lowercase , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowercase , lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCamelCase__ = tokenizer_class.from_pretrained(lowercase , extra_ids=0 )
self.assertIn(lowercase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
UpperCamelCase__ = 0xE007
UpperCamelCase__ = chr(lowercase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCamelCase__ = [AddedToken(lowercase , lstrip=lowercase )]
UpperCamelCase__ = tokenizer_class.from_pretrained(
lowercase , additional_special_tokens=lowercase , extra_ids=0 )
self.assertIn(lowercase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def A ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = self.get_tokenizers(do_lower_case=lowercase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
UpperCamelCase__ = """hello world"""
if self.space_between_special_tokens:
UpperCamelCase__ = """[CLS] hello world [SEP]"""
else:
UpperCamelCase__ = input
UpperCamelCase__ = tokenizer.encode(lowercase , add_special_tokens=lowercase )
UpperCamelCase__ = tokenizer.decode(lowercase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(lowercase , [output, output.lower()] )
def A ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
UpperCamelCase__ = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
UpperCamelCase__ = """a"""
UpperCamelCase__ = ord(lowercase )
for attr in attributes_list:
setattr(lowercase , attr + """_id""" , lowercase )
self.assertEqual(getattr(lowercase , lowercase ) , lowercase )
self.assertEqual(getattr(lowercase , attr + """_id""" ) , lowercase )
setattr(lowercase , attr + """_id""" , lowercase )
self.assertEqual(getattr(lowercase , lowercase ) , lowercase )
self.assertEqual(getattr(lowercase , attr + """_id""" ) , lowercase )
setattr(lowercase , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(lowercase , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(lowercase , """additional_special_tokens_ids""" ) , [] )
UpperCamelCase__ = 0xE006
UpperCamelCase__ = chr(lowercase )
setattr(lowercase , """additional_special_tokens_ids""" , [additional_special_token_id] )
self.assertListEqual(getattr(lowercase , """additional_special_tokens""" ) , [additional_special_token] )
self.assertListEqual(getattr(lowercase , """additional_special_tokens_ids""" ) , [additional_special_token_id] )
def A ( self : Optional[int] ) -> int:
'''simple docstring'''
pass
def A ( self : int ) -> Tuple:
'''simple docstring'''
pass
def A ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
pass
def A ( self : Dict ) -> Dict:
'''simple docstring'''
pass
def A ( self : Any ) -> Tuple:
'''simple docstring'''
pass
def A ( self : Optional[Any] ) -> str:
'''simple docstring'''
pass
def A ( self : str ) -> List[Any]:
'''simple docstring'''
pass
def A ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
pass
| 265
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowerCamelCase_ : str = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __magic_name__( _A ):
'''simple docstring'''
if isinstance(_A , torch.Tensor ):
return image
elif isinstance(_A , PIL.Image.Image ):
UpperCamelCase__ = [image]
UpperCamelCase__ = [trans(img.convert("""RGB""" ) ) for img in image]
UpperCamelCase__ = torch.stack(_A )
return image
class _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Tuple , lowercase : Dict , lowercase : Optional[Any] ) -> Any:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
UpperCamelCase__ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowercase , scheduler=lowercase )
def A ( self : Tuple , lowercase : Tuple ) -> Any:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}" )
def A ( self : str , lowercase : List[Any] , lowercase : Dict , lowercase : List[str] ) -> str:
'''simple docstring'''
UpperCamelCase__ = min(int(num_inference_steps * strength ) , lowercase )
UpperCamelCase__ = max(num_inference_steps - init_timestep , 0 )
UpperCamelCase__ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def A ( self : Union[str, Any] , lowercase : Optional[Any] , lowercase : Any , lowercase : Union[str, Any] , lowercase : Dict , lowercase : Tuple , lowercase : int=None ) -> int:
'''simple docstring'''
if not isinstance(lowercase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase )}" )
UpperCamelCase__ = image.to(device=lowercase , dtype=lowercase )
if isinstance(lowercase , lowercase ) and len(lowercase ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(lowercase )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
UpperCamelCase__ = init_latents.shape
UpperCamelCase__ = randn_tensor(lowercase , generator=lowercase , device=lowercase , dtype=lowercase )
# get latents
print("""add noise to latents at timestep""" , lowercase )
UpperCamelCase__ = self.scheduler.add_noise(lowercase , lowercase , lowercase )
UpperCamelCase__ = init_latents
return latents
@torch.no_grad()
def __call__( self : Any , lowercase : Union[torch.FloatTensor, PIL.Image.Image] = None , lowercase : float = 0.8 , lowercase : int = 1 , lowercase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase : float = 0.0 , lowercase : int = 5_0 , lowercase : Optional[bool] = None , lowercase : Optional[str] = "pil" , lowercase : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(lowercase )
# 2. Preprocess image
UpperCamelCase__ = preprocess(lowercase )
# 3. set timesteps
self.scheduler.set_timesteps(lowercase , device=self.device )
UpperCamelCase__ , UpperCamelCase__ = self.get_timesteps(lowercase , lowercase , self.device )
UpperCamelCase__ = timesteps[:1].repeat(lowercase )
# 4. Prepare latent variables
UpperCamelCase__ = self.prepare_latents(lowercase , lowercase , lowercase , self.unet.dtype , self.device , lowercase )
UpperCamelCase__ = latents
# 5. Denoising loop
for t in self.progress_bar(lowercase ):
# 1. predict noise model_output
UpperCamelCase__ = self.unet(lowercase , lowercase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(
lowercase , lowercase , lowercase , eta=lowercase , use_clipped_model_output=lowercase , generator=lowercase , ).prev_sample
UpperCamelCase__ = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(lowercase )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=lowercase )
| 265
| 1
|
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowerCAmelCase_ = {
'''gwf-440k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/gwf-440k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 6_55_36,
},
'''jmann-small-190k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 6_55_36,
},
'''jmann-large-580k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt''',
'''sample_rate''': 4_80_00,
'''sample_size''': 13_10_72,
},
'''maestro-uncond-150k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
'''unlocked-uncond-250k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
'''honk-140k''': {
'''url''': '''https://model-server.zqevans2.workers.dev/honk-140k.ckpt''',
'''sample_rate''': 1_60_00,
'''sample_size''': 6_55_36,
},
}
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return torch.atana(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) / math.pi * 2
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = torch.sin(t * math.pi / 2 ) ** 2
snake_case_ = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
class snake_case_ ( __A ):
'''simple docstring'''
pass
class snake_case_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , _UpperCamelCase : int ) ->Optional[int]:
super().__init__()
snake_case_ = DiffusionAttnUnetaD(_UpperCamelCase , n_attn_layers=4 )
snake_case_ = deepcopy(self.diffusion )
snake_case_ = torch.quasirandom.SobolEngine(1 , scramble=_UpperCamelCase )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = MODELS_MAP[model_name]['''url''']
os.system(F'''wget {url} ./''' )
return F'''./{model_name}.ckpt'''
lowerCAmelCase_ = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
}
lowerCAmelCase_ = {
'''8''': '''resnets.0''',
'''9''': '''attentions.0''',
'''10''': '''resnets.1''',
'''11''': '''attentions.1''',
'''12''': '''resnets.2''',
'''13''': '''attentions.2''',
}
lowerCAmelCase_ = {
'''1''': '''resnets.0''',
'''2''': '''attentions.0''',
'''3''': '''resnets.1''',
'''4''': '''attentions.1''',
'''5''': '''resnets.2''',
'''6''': '''attentions.2''',
'''8''': '''resnets.3''',
'''9''': '''attentions.3''',
'''10''': '''resnets.4''',
'''11''': '''attentions.4''',
'''12''': '''resnets.5''',
'''13''': '''attentions.5''',
}
lowerCAmelCase_ = {
'''0''': '''resnets.0''',
'''1''': '''resnets.1''',
'''2''': '''resnets.2''',
'''4''': '''resnets.0''',
'''5''': '''resnets.1''',
'''6''': '''resnets.2''',
}
lowerCAmelCase_ = {
'''skip''': '''conv_skip''',
'''main.0''': '''conv_1''',
'''main.1''': '''group_norm_1''',
'''main.3''': '''conv_2''',
'''main.4''': '''group_norm_2''',
}
lowerCAmelCase_ = {
'''norm''': '''group_norm''',
'''qkv_proj''': ['''query''', '''key''', '''value'''],
'''out_proj''': ['''proj_attn'''],
}
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if name.startswith('''skip''' ):
return name.replace('''skip''' , RES_CONV_MAP['''skip'''] )
# name has to be of format main.{digit}
if not name.startswith('''main.''' ):
raise ValueError(F'''ResConvBlock error with {name}''' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
for key, value in ATTN_MAP.items():
if name.startswith(SCREAMING_SNAKE_CASE__ ) and not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return name.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif name.startswith(SCREAMING_SNAKE_CASE__ ):
return [name.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for v in value]
raise ValueError(F'''Attn error with {name}''' )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 ):
snake_case_ = input_string
if string.split('''.''' )[0] == "timestep_embed":
return string.replace('''timestep_embed''' , '''time_proj''' )
snake_case_ = 0
if string.startswith('''net.3.''' ):
depth += 1
snake_case_ = string[6:]
elif string.startswith('''net.''' ):
snake_case_ = string[4:]
while string.startswith('''main.7.''' ):
depth += 1
snake_case_ = string[7:]
if string.startswith('''main.''' ):
snake_case_ = string[5:]
# mid block
if string[:2].isdigit():
snake_case_ = string[:2]
snake_case_ = string[2:]
else:
snake_case_ = string[0]
snake_case_ = string[1:]
if depth == max_depth:
snake_case_ = MID_NUM_TO_LAYER[layer_num]
snake_case_ = '''mid_block'''
elif depth > 0 and int(SCREAMING_SNAKE_CASE__ ) < 7:
snake_case_ = DOWN_NUM_TO_LAYER[layer_num]
snake_case_ = F'''down_blocks.{depth}'''
elif depth > 0 and int(SCREAMING_SNAKE_CASE__ ) > 7:
snake_case_ = UP_NUM_TO_LAYER[layer_num]
snake_case_ = F'''up_blocks.{max_depth - depth - 1}'''
elif depth == 0:
snake_case_ = DEPTH_0_TO_LAYER[layer_num]
snake_case_ = F'''up_blocks.{max_depth - 1}''' if int(SCREAMING_SNAKE_CASE__ ) > 3 else '''down_blocks.0'''
if not string_left.startswith('''.''' ):
raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' )
snake_case_ = string_left[1:]
if "resnets" in new_layer:
snake_case_ = convert_resconv_naming(SCREAMING_SNAKE_CASE__ )
elif "attentions" in new_layer:
snake_case_ = convert_attn_naming(SCREAMING_SNAKE_CASE__ )
snake_case_ = new_string_left
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = prefix + '''.''' + new_layer + '''.''' + string_left
else:
snake_case_ = [prefix + '''.''' + new_layer + '''.''' + s for s in string_left]
return new_string
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = {}
for k, v in state_dict.items():
if k.endswith('''kernel''' ):
# up- and downsample layers, don't have trainable weights
continue
snake_case_ = rename(SCREAMING_SNAKE_CASE__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = transform_conv_attns(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
snake_case_ = v
return new_state_dict
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if len(SCREAMING_SNAKE_CASE__ ) == 1:
if len(v.shape ) == 3:
# weight
snake_case_ = v[:, :, 0]
else:
# bias
snake_case_ = v
else:
# qkv matrices
snake_case_ = v.shape[0]
snake_case_ = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
snake_case_ = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
snake_case_ = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
snake_case_ = args.model_path.split('''/''' )[-1].split('''.''' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}'''
snake_case_ = download(SCREAMING_SNAKE_CASE__ )
snake_case_ = MODELS_MAP[model_name]['''sample_rate''']
snake_case_ = MODELS_MAP[model_name]['''sample_size''']
snake_case_ = Object()
snake_case_ = sample_size
snake_case_ = sample_rate
snake_case_ = 0
snake_case_ = UNetaDModel(sample_size=SCREAMING_SNAKE_CASE__ , sample_rate=SCREAMING_SNAKE_CASE__ )
snake_case_ = diffusers_model.state_dict()
snake_case_ = DiffusionUncond(SCREAMING_SNAKE_CASE__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=SCREAMING_SNAKE_CASE__ )['''state_dict'''] )
snake_case_ = orig_model.diffusion_ema.eval()
snake_case_ = orig_model.state_dict()
snake_case_ = rename_orig_weights(SCREAMING_SNAKE_CASE__ )
snake_case_ = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
snake_case_ = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(SCREAMING_SNAKE_CASE__ ) == 0, F'''Problem with {renamed_minus_diffusers}'''
assert all(k.endswith('''kernel''' ) for k in list(SCREAMING_SNAKE_CASE__ ) ), F'''Problem with {diffusers_minus_renamed}'''
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'''
if key == "time_proj.weight":
snake_case_ = value.squeeze()
snake_case_ = value
diffusers_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
snake_case_ = 100
snake_case_ = 33
snake_case_ = IPNDMScheduler(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.randn([1, 2, config.sample_size] , generator=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.linspace(1 , 0 , steps + 1 , device=SCREAMING_SNAKE_CASE__ )[:-1]
snake_case_ = get_crash_schedule(SCREAMING_SNAKE_CASE__ )
snake_case_ = DanceDiffusionPipeline(unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ )
snake_case_ = torch.manual_seed(33 )
snake_case_ = pipe(num_inference_steps=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).audios
snake_case_ = sampling.iplms_sample(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , {} )
snake_case_ = generated.clamp(-1 , 1 )
snake_case_ = (generated - audio).abs().sum()
snake_case_ = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('''Diff sum''' , SCREAMING_SNAKE_CASE__ )
print('''Diff max''' , SCREAMING_SNAKE_CASE__ )
assert diff_max < 1E-3, F'''Diff max: {diff_max} is too much :-/'''
print(F'''Conversion for {model_name} successful!''' )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCAmelCase_ = parser.parse_args()
main(args)
| 39
|
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
lowerCAmelCase_ = get_logger(__name__)
lowerCAmelCase_ = Path(__file__).parent / '''model_card_template.md'''
lowerCAmelCase_ = uuida().hex
lowerCAmelCase_ = os.getenv('''HF_HUB_OFFLINE''', '''''').upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase_ = os.getenv('''DISABLE_TELEMETRY''', '''''').upper() in ENV_VARS_TRUE_VALUES
lowerCAmelCase_ = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '''/api/telemetry/'''
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = None ):
snake_case_ = F'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'''
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F'''; torch/{_torch_version}'''
if is_flax_available():
ua += F'''; jax/{_jax_version}'''
ua += F'''; flax/{_flax_version}'''
if is_onnx_available():
ua += F'''; onnxruntime/{_onnxruntime_version}'''
# CI will set this value to True
if os.environ.get('''DIFFUSERS_IS_CI''' , '''''' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
ua += "; " + "; ".join(F'''{k}/{v}''' for k, v in user_agent.items() )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
ua += "; " + user_agent
return ua
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ):
if token is None:
snake_case_ = HfFolder.get_token()
if organization is None:
snake_case_ = whoami(SCREAMING_SNAKE_CASE__ )['''name''']
return F'''{username}/{model_id}'''
else:
return F'''{organization}/{model_id}'''
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if not is_jinja_available():
raise ValueError(
'''Modelcard rendering is based on Jinja templates.'''
''' Please make sure to have `jinja` installed before using `create_model_card`.'''
''' To install it, please run `pip install Jinja2`.''' )
if hasattr(SCREAMING_SNAKE_CASE__ , '''local_rank''' ) and args.local_rank not in [-1, 0]:
return
snake_case_ = args.hub_token if hasattr(SCREAMING_SNAKE_CASE__ , '''hub_token''' ) else None
snake_case_ = get_full_repo_name(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ )
snake_case_ = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=SCREAMING_SNAKE_CASE__ , model_name=SCREAMING_SNAKE_CASE__ , repo_name=SCREAMING_SNAKE_CASE__ , dataset_name=args.dataset_name if hasattr(SCREAMING_SNAKE_CASE__ , '''dataset_name''' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(SCREAMING_SNAKE_CASE__ , '''gradient_accumulation_steps''' ) else None
) , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_beta1''' ) else None , adam_betaa=args.adam_betaa if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_beta2''' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_weight_decay''' ) else None , adam_epsilon=args.adam_epsilon if hasattr(SCREAMING_SNAKE_CASE__ , '''adam_epsilon''' ) else None , lr_scheduler=args.lr_scheduler if hasattr(SCREAMING_SNAKE_CASE__ , '''lr_scheduler''' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(SCREAMING_SNAKE_CASE__ , '''lr_warmup_steps''' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(SCREAMING_SNAKE_CASE__ , '''ema_inv_gamma''' ) else None , ema_power=args.ema_power if hasattr(SCREAMING_SNAKE_CASE__ , '''ema_power''' ) else None , ema_max_decay=args.ema_max_decay if hasattr(SCREAMING_SNAKE_CASE__ , '''ema_max_decay''' ) else None , mixed_precision=args.mixed_precision , )
snake_case_ = os.path.join(args.output_dir , '''README.md''' )
model_card.save(SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
if resolved_file is None or commit_hash is not None:
return commit_hash
snake_case_ = str(Path(SCREAMING_SNAKE_CASE__ ).as_posix() )
snake_case_ = re.search(R'''snapshots/([^/]+)/''' , SCREAMING_SNAKE_CASE__ )
if search is None:
return None
snake_case_ = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(SCREAMING_SNAKE_CASE__ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
lowerCAmelCase_ = os.path.expanduser(
os.getenv('''HF_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''huggingface'''))
)
lowerCAmelCase_ = os.path.join(hf_cache_home, '''diffusers''')
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None ):
if new_cache_dir is None:
snake_case_ = DIFFUSERS_CACHE
if old_cache_dir is None:
snake_case_ = old_diffusers_cache
snake_case_ = Path(SCREAMING_SNAKE_CASE__ ).expanduser()
snake_case_ = Path(SCREAMING_SNAKE_CASE__ ).expanduser()
for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
snake_case_ = new_cache_dir / old_blob_path.relative_to(SCREAMING_SNAKE_CASE__ )
new_blob_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
os.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
try:
os.symlink(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
except OSError:
logger.warning(
'''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
lowerCAmelCase_ = os.path.join(DIFFUSERS_CACHE, '''version_diffusers_cache.txt''')
if not os.path.isfile(cache_version_file):
lowerCAmelCase_ = 0
else:
with open(cache_version_file) as f:
try:
lowerCAmelCase_ = int(f.read())
except ValueError:
lowerCAmelCase_ = 0
if cache_version < 1:
lowerCAmelCase_ = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'''The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '''
'''existing cached models. This is a one-time operation, you can interrupt it or run it '''
'''later by calling `diffusers.utils.hub_utils.move_cache()`.'''
)
try:
move_cache()
except Exception as e:
lowerCAmelCase_ = '''\n'''.join(traceback.format_tb(e.__traceback__))
logger.error(
f"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
'''file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '''
'''message and we will do our best to help.'''
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, '''w''') as f:
f.write('''1''')
except Exception:
logger.warning(
f"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
'''the directory exists and can be written to.'''
)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
if variant is not None:
snake_case_ = weights_name.split('''.''' )
snake_case_ = splits[:-1] + [variant] + splits[-1:]
snake_case_ = '''.'''.join(SCREAMING_SNAKE_CASE__ )
return weights_name
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , *,
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , ):
snake_case_ = str(SCREAMING_SNAKE_CASE__ )
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
return pretrained_model_name_or_path
elif os.path.isdir(SCREAMING_SNAKE_CASE__ ):
if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
# Load from a PyTorch checkpoint
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ):
snake_case_ = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return model_file
else:
raise EnvironmentError(
F'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(SCREAMING_SNAKE_CASE__ ).base_version ) >= version.parse('''0.20.0''' )
):
try:
snake_case_ = hf_hub_download(
SCREAMING_SNAKE_CASE__ , filename=_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , )
warnings.warn(
F'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''' , SCREAMING_SNAKE_CASE__ , )
return model_file
except: # noqa: E722
warnings.warn(
F'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}\' so that the correct variant file can be added.''' , SCREAMING_SNAKE_CASE__ , )
try:
# 2. Load model file as usual
snake_case_ = hf_hub_download(
SCREAMING_SNAKE_CASE__ , filename=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ , proxies=SCREAMING_SNAKE_CASE__ , resume_download=SCREAMING_SNAKE_CASE__ , local_files_only=SCREAMING_SNAKE_CASE__ , use_auth_token=SCREAMING_SNAKE_CASE__ , user_agent=SCREAMING_SNAKE_CASE__ , subfolder=SCREAMING_SNAKE_CASE__ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '''
'''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '''
'''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '''
'''login`.''' )
except RevisionNotFoundError:
raise EnvironmentError(
F'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '''
'''this model name. Check the model page at '''
F'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' )
except EntryNotFoundError:
raise EnvironmentError(
F'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' )
except HTTPError as err:
raise EnvironmentError(
F'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' )
except ValueError:
raise EnvironmentError(
F'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'''
F''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'''
F''' directory containing a file named {weights_name} or'''
''' \nCheckout your internet connection or see how to run the library in'''
''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' )
except EnvironmentError:
raise EnvironmentError(
F'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '''
'''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '''
F'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '''
F'''containing a file named {weights_name}''' )
| 39
| 1
|
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
_snake_case : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
_snake_case : List[Any] = parser.parse_args()
_snake_case : Optional[int] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
_snake_case : Dict = CLIPImageProcessor()
_snake_case : Tuple = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
_snake_case : Any = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 713
|
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case : Optional[int] = "▁"
_snake_case : Tuple = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class a (_lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = BertGenerationTokenizer
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : List[Any] = True
def __snake_case ( self : Optional[int] ) -> Optional[int]:
super().setUp()
__snake_case : Tuple = BertGenerationTokenizer(lowerCamelCase , keep_accents=lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case ( self : Dict ) -> int:
__snake_case : str = "<s>"
__snake_case : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) , lowerCamelCase )
def __snake_case ( self : str ) -> Optional[Any]:
__snake_case : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(lowerCamelCase ) , 1002 )
def __snake_case ( self : List[str] ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def __snake_case ( self : List[str] ) -> Union[str, Any]:
__snake_case : List[Any] = BertGenerationTokenizer(lowerCamelCase , keep_accents=lowerCamelCase )
__snake_case : int = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [285, 46, 10, 170, 382] , )
__snake_case : str = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
__snake_case : Any = tokenizer.convert_tokens_to_ids(lowerCamelCase )
self.assertListEqual(
lowerCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
__snake_case : Optional[Any] = tokenizer.convert_ids_to_tokens(lowerCamelCase )
self.assertListEqual(
lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def __snake_case ( self : str ) -> List[Any]:
return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
@slow
def __snake_case ( self : Tuple ) -> Union[str, Any]:
__snake_case : Union[str, Any] = "Hello World!"
__snake_case : List[str] = [18536, 2260, 101]
self.assertListEqual(lowerCamelCase , self.big_tokenizer.encode(lowerCamelCase ) )
@slow
def __snake_case ( self : str ) -> Optional[int]:
__snake_case : List[Any] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
__snake_case : Any = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
34324,
497,
391,
408,
11342,
1244,
385,
100,
938,
985,
456,
574,
362,
12597,
3200,
3129,
1172,
]
self.assertListEqual(lowerCamelCase , self.big_tokenizer.encode(lowerCamelCase ) )
@require_torch
@slow
def __snake_case ( self : Optional[Any] ) -> str:
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
__snake_case : List[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
__snake_case : Union[str, Any] = " ".join(lowerCamelCase )
__snake_case : Optional[int] = self.big_tokenizer.encode_plus(lowerCamelCase , return_tensors="pt" , return_token_type_ids=lowerCamelCase )
__snake_case : Optional[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=lowerCamelCase )
__snake_case : List[Any] = BertGenerationConfig()
__snake_case : Dict = BertGenerationEncoder(lowerCamelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCamelCase )
model(**lowerCamelCase )
@slow
def __snake_case ( self : List[Any] ) -> List[Any]:
# fmt: off
__snake_case : Optional[Any] = {"input_ids": [[39286, 458, 36335, 2001, 456, 13073, 13266, 455, 113, 7746, 1741, 11157, 391, 13073, 13266, 455, 113, 3967, 35412, 113, 4936, 109, 3870, 2377, 113, 30084, 45720, 458, 134, 17496, 112, 503, 11672, 113, 118, 112, 5665, 13347, 38687, 112, 1496, 31389, 112, 3268, 47264, 134, 962, 112, 16377, 8035, 23130, 430, 12169, 15518, 28592, 458, 146, 41697, 109, 391, 12169, 15518, 16689, 458, 146, 41358, 109, 452, 726, 4034, 111, 763, 35412, 5082, 388, 1903, 111, 9051, 391, 2870, 48918, 1900, 1123, 550, 998, 112, 9586, 15985, 455, 391, 410, 22955, 37636, 114], [448, 17496, 419, 3663, 385, 763, 113, 27533, 2870, 3283, 13043, 1639, 24713, 523, 656, 24013, 18550, 2521, 517, 27014, 21244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 11786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 21932, 18146, 726, 363, 17032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase , model_name="google/bert_for_seq_generation_L-24_bbc_encoder" , revision="c817d1fd1be2ffa69431227a1fe320544943d4db" , )
| 203
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
def __init__(self ,__lowerCamelCase ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = data
def __iter__(self ) -> Optional[int]:
"""simple docstring"""
for element in self.data:
yield element
def lowerCAmelCase__ ( lowerCamelCase_ : List[str]=True):
'''simple docstring'''
lowerCAmelCase__ : int = Accelerator(even_batches=lowerCamelCase_)
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def lowerCAmelCase__ ( lowerCamelCase_ : Accelerator ,lowerCamelCase_ : int ,lowerCamelCase_ : int ,lowerCamelCase_ : bool = False):
'''simple docstring'''
if iterable:
lowerCAmelCase__ : str = DummyIterableDataset(torch.as_tensor(range(lowerCamelCase_)))
else:
lowerCAmelCase__ : int = TensorDataset(torch.as_tensor(range(lowerCamelCase_)))
lowerCAmelCase__ : str = DataLoader(lowerCamelCase_ ,batch_size=lowerCamelCase_)
lowerCAmelCase__ : Dict = accelerator.prepare(lowerCamelCase_)
return dl
def lowerCAmelCase__ ( lowerCamelCase_ : Accelerator ,lowerCamelCase_ : int ,lowerCamelCase_ : int ,lowerCamelCase_ : List[int] ,lowerCamelCase_ : List[int] ,):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = create_dataloader(accelerator=lowerCamelCase_ ,dataset_size=lowerCamelCase_ ,batch_size=lowerCamelCase_)
lowerCAmelCase__ : str = [len(batch[0]) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : int = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
lowerCamelCase_ ,dataset_size=3 ,batch_size=1 ,process_0_expected_batch_sizes=[1, 1] ,process_1_expected_batch_sizes=[1, 1] ,)
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
lowerCamelCase_ ,dataset_size=7 ,batch_size=2 ,process_0_expected_batch_sizes=[2, 2] ,process_1_expected_batch_sizes=[2, 2] ,)
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = create_accelerator(even_batches=lowerCamelCase_)
verify_dataloader_batch_sizes(
lowerCamelCase_ ,dataset_size=3 ,batch_size=1 ,process_0_expected_batch_sizes=[1, 1] ,process_1_expected_batch_sizes=[1] ,)
verify_dataloader_batch_sizes(
lowerCamelCase_ ,dataset_size=7 ,batch_size=2 ,process_0_expected_batch_sizes=[2, 2] ,process_1_expected_batch_sizes=[2, 1] ,)
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = create_accelerator(even_batches=lowerCamelCase_)
lowerCAmelCase__ : List[Any] = torch.nn.Linear(1 ,1)
lowerCAmelCase__ : Tuple = accelerator.prepare(lowerCamelCase_)
lowerCAmelCase__ : Dict = create_dataloader(lowerCamelCase_ ,dataset_size=3 ,batch_size=1)
lowerCAmelCase__ : List[str] = []
with accelerator.join_uneven_inputs([ddp_model]):
for batch_idx, batch in enumerate(lowerCamelCase_):
lowerCAmelCase__ : Optional[Any] = ddp_model(batch[0].float())
lowerCAmelCase__ : List[Any] = output.sum()
loss.backward()
batch_idxs.append(lowerCamelCase_)
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def lowerCAmelCase__ ( lowerCamelCase_ : int):
'''simple docstring'''
with warnings.catch_warnings(record=lowerCamelCase_) as w:
with accelerator.join_uneven_inputs([Mock()]):
pass
assert issubclass(w[-1].category ,lowerCamelCase_)
assert "only supported for multi-GPU" in str(w[-1].message)
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = True
lowerCAmelCase__ : List[Any] = False
lowerCAmelCase__ : List[str] = create_accelerator(even_batches=lowerCamelCase_)
lowerCAmelCase__ : Union[str, Any] = torch.nn.Linear(1 ,1)
lowerCAmelCase__ : List[Any] = accelerator.prepare(lowerCamelCase_)
lowerCAmelCase__ : Any = create_dataloader(lowerCamelCase_ ,dataset_size=3 ,batch_size=1)
lowerCAmelCase__ : Union[str, Any] = create_dataloader(lowerCamelCase_ ,dataset_size=3 ,batch_size=1)
with accelerator.join_uneven_inputs([ddp_model] ,even_batches=lowerCamelCase_):
lowerCAmelCase__ : Optional[int] = train_dl.batch_sampler.even_batches
lowerCAmelCase__ : Dict = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = True
lowerCAmelCase__ : List[str] = False
lowerCAmelCase__ : str = create_accelerator(even_batches=lowerCamelCase_)
lowerCAmelCase__ : int = torch.nn.Linear(1 ,1)
lowerCAmelCase__ : int = accelerator.prepare(lowerCamelCase_)
create_dataloader(lowerCamelCase_ ,dataset_size=3 ,batch_size=1 ,iterable=lowerCamelCase_)
lowerCAmelCase__ : Tuple = create_dataloader(lowerCamelCase_ ,dataset_size=3 ,batch_size=1)
with warnings.catch_warnings():
warnings.filterwarnings('''ignore''')
try:
with accelerator.join_uneven_inputs([ddp_model] ,even_batches=lowerCamelCase_):
lowerCAmelCase__ : List[Any] = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = create_accelerator()
lowerCAmelCase__ : Tuple = torch.nn.Linear(1 ,1)
lowerCAmelCase__ : List[str] = accelerator.prepare(lowerCamelCase_)
create_dataloader(lowerCamelCase_ ,dataset_size=3 ,batch_size=1 ,iterable=lowerCamelCase_)
with warnings.catch_warnings(record=lowerCamelCase_) as w:
with accelerator.join_uneven_inputs([ddp_model] ,even_batches=lowerCamelCase_):
pass
assert issubclass(w[-1].category ,lowerCamelCase_)
assert "only supported for map-style datasets" in str(w[-1].message)
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = create_accelerator()
accelerator.print('''Test that even_batches variable ensures uniform batches across processes''')
test_default_ensures_even_batch_sizes()
accelerator.print('''Run tests with even_batches disabled''')
test_can_disable_even_batches()
accelerator.print('''Test joining uneven inputs''')
test_can_join_uneven_inputs()
accelerator.print('''Test overriding even_batches when joining uneven inputs''')
test_join_can_override_even_batches()
accelerator.print('''Test overriding even_batches for mixed dataloader types''')
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print('''Test overriding even_batches raises a warning for iterable dataloaders''')
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print('''Test join with non DDP distributed raises warning''')
lowerCAmelCase__ : str = accelerator.state.distributed_type
lowerCAmelCase__ : Any = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(lowerCamelCase_)
lowerCAmelCase__ : List[Any] = original_state
if __name__ == "__main__":
main()
| 647
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__snake_case : int ={
'configuration_gpt_bigcode': ['GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTBigCodeConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] =[
'GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTBigCodeForSequenceClassification',
'GPTBigCodeForTokenClassification',
'GPTBigCodeForCausalLM',
'GPTBigCodeModel',
'GPTBigCodePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
__snake_case : Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 647
| 1
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __magic_name__( unittest.TestCase ):
def __lowerCAmelCase( self : Dict ):
'''simple docstring'''
snake_case__ = tempfile.mkdtemp()
snake_case__ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
snake_case__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
snake_case__ = {
"do_resize": True,
"size": {"height": 2_2_4, "width": 2_2_4},
"do_center_crop": True,
"crop_size": {"height": 1_8, "width": 1_8},
"do_normalize": True,
"image_mean": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
"image_std": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
"do_convert_rgb": True,
}
snake_case__ = os.path.join(self.tmpdirname , A_ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(A_ , A_ )
def __lowerCAmelCase( self : int , **__UpperCamelCase : List[Any] ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **A_ )
def __lowerCAmelCase( self : Tuple , **__UpperCamelCase : Any ):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **A_ )
def __lowerCAmelCase( self : Any , **__UpperCamelCase : Dict ):
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **A_ )
def __lowerCAmelCase( self : int ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase( self : Optional[int] ):
'''simple docstring'''
snake_case__ = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
snake_case__ = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase( self : Any ):
'''simple docstring'''
snake_case__ = self.get_tokenizer()
snake_case__ = self.get_rust_tokenizer()
snake_case__ = self.get_image_processor()
snake_case__ = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
processor_slow.save_pretrained(self.tmpdirname )
snake_case__ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=A_ )
snake_case__ = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
processor_fast.save_pretrained(self.tmpdirname )
snake_case__ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A_ )
self.assertIsInstance(processor_fast.tokenizer , A_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A_ )
self.assertIsInstance(processor_fast.image_processor , A_ )
def __lowerCAmelCase( self : Any ):
'''simple docstring'''
snake_case__ = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case__ = self.get_tokenizer(cls_token="""(CLS)""" , sep_token="""(SEP)""" )
snake_case__ = self.get_image_processor(do_normalize=A_ )
snake_case__ = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="""(CLS)""" , sep_token="""(SEP)""" , do_normalize=A_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A_ )
def __lowerCAmelCase( self : List[Any] ):
'''simple docstring'''
snake_case__ = self.get_image_processor()
snake_case__ = self.get_tokenizer()
snake_case__ = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
snake_case__ = self.prepare_image_inputs()
snake_case__ = image_processor(A_ , return_tensors="""np""" )
snake_case__ = processor(images=A_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCAmelCase( self : Dict ):
'''simple docstring'''
snake_case__ = self.get_image_processor()
snake_case__ = self.get_tokenizer()
snake_case__ = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
snake_case__ = "Alexandra,T-shirt的价格是15便士。"
snake_case__ = processor(text=A_ )
snake_case__ = tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCAmelCase( self : str ):
'''simple docstring'''
snake_case__ = self.get_image_processor()
snake_case__ = self.get_tokenizer()
snake_case__ = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
snake_case__ = "Alexandra,T-shirt的价格是15便士。"
snake_case__ = self.prepare_image_inputs()
snake_case__ = processor(text=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def __lowerCAmelCase( self : Any ):
'''simple docstring'''
snake_case__ = self.get_image_processor()
snake_case__ = self.get_tokenizer()
snake_case__ = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
snake_case__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case__ = processor.batch_decode(A_ )
snake_case__ = tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
def __lowerCAmelCase( self : Tuple ):
'''simple docstring'''
snake_case__ = self.get_image_processor()
snake_case__ = self.get_tokenizer()
snake_case__ = ChineseCLIPProcessor(tokenizer=A_ , image_processor=A_ )
snake_case__ = "Alexandra,T-shirt的价格是15便士。"
snake_case__ = self.prepare_image_inputs()
snake_case__ = processor(text=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 716
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {'''vocab_file''': '''spiece.model'''}
a__ = {
'''vocab_file''': {
'''bert_for_seq_generation''': (
'''https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'''
),
}
}
a__ = {'''bert_for_seq_generation''': 512}
class __magic_name__( __lowerCAmelCase ):
UpperCAmelCase_ : Union[str, Any] = VOCAB_FILES_NAMES
UpperCAmelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : List[int] = []
UpperCAmelCase_ : Tuple = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : Any="<s>" , __UpperCamelCase : str="</s>" , __UpperCamelCase : List[Any]="<unk>" , __UpperCamelCase : Union[str, Any]="<pad>" , __UpperCamelCase : Optional[Any]="<::::>" , __UpperCamelCase : Optional[Dict[str, Any]] = None , **__UpperCamelCase : Dict , ):
'''simple docstring'''
snake_case__ = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , sep_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
snake_case__ = vocab_file
snake_case__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCamelCase )
@property
def __lowerCAmelCase( self : Union[str, Any] ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def __lowerCAmelCase( self : Optional[int] ):
'''simple docstring'''
snake_case__ = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : int ):
'''simple docstring'''
snake_case__ = self.__dict__.copy()
snake_case__ = None
return state
def __setstate__( self : int , __UpperCamelCase : int ):
'''simple docstring'''
snake_case__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case__ = {}
snake_case__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase( self : Optional[Any] , __UpperCamelCase : str ):
'''simple docstring'''
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def __lowerCAmelCase( self : List[Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
return self.sp_model.piece_to_id(__UpperCamelCase )
def __lowerCAmelCase( self : str , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case__ = self.sp_model.IdToPiece(__UpperCamelCase )
return token
def __lowerCAmelCase( self : int , __UpperCamelCase : str ):
'''simple docstring'''
snake_case__ = []
snake_case__ = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__UpperCamelCase ) + token
snake_case__ = []
else:
current_sub_tokens.append(__UpperCamelCase )
out_string += self.sp_model.decode(__UpperCamelCase )
return out_string.strip()
def __lowerCAmelCase( self : str , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__UpperCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case__ = os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , """wb""" ) as fi:
snake_case__ = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
| 566
| 0
|
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class a ( lowercase ):
def __snake_case ( self ):
UpperCAmelCase__ : Any = tempfile.mkdtemp()
UpperCAmelCase__ : Tuple = 8
# DPR tok
UpperCAmelCase__ : Optional[Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
UpperCAmelCase__ : List[Any] = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
UpperCAmelCase__ : Optional[Any] = os.path.join(UpperCamelCase_ , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
UpperCAmelCase__ : List[str] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
UpperCAmelCase__ : Tuple = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
UpperCAmelCase__ : List[Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
UpperCAmelCase__ : Tuple = {'unk_token': '<unk>'}
UpperCAmelCase__ : Optional[Any] = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
UpperCAmelCase__ : Any = os.path.join(UpperCamelCase_ , BART_VOCAB_FILES_NAMES['vocab_file'] )
UpperCAmelCase__ : List[str] = os.path.join(UpperCamelCase_ , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCamelCase_ ) )
def __snake_case ( self ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def __snake_case ( self ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def __snake_case ( self ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def __snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def __snake_case ( self ):
UpperCAmelCase__ : List[Any] = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def __snake_case ( self ):
UpperCAmelCase__ : Any = self.get_dummy_dataset()
UpperCAmelCase__ : str = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
UpperCAmelCase__ : List[str] = dataset
UpperCAmelCase__ : str = RagRetriever(
UpperCamelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def __snake_case ( self , UpperCamelCase_ ):
UpperCAmelCase__ : int = self.get_dummy_dataset()
UpperCAmelCase__ : List[Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='custom' , )
if from_disk:
UpperCAmelCase__ : List[str] = os.path.join(self.tmpdirname , 'dataset' )
UpperCAmelCase__ : Dict = os.path.join(self.tmpdirname , 'index.faiss' )
dataset.get_index('embeddings' ).save(os.path.join(self.tmpdirname , 'index.faiss' ) )
dataset.drop_index('embeddings' )
dataset.save_to_disk(os.path.join(self.tmpdirname , 'dataset' ) )
del dataset
UpperCAmelCase__ : Tuple = RagRetriever(
UpperCamelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
UpperCAmelCase__ : Optional[Any] = RagRetriever(
UpperCamelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , UpperCamelCase_ ) , )
return retriever
def __snake_case ( self ):
UpperCAmelCase__ : Any = Dataset.from_dict(
{
'id': ['0', '1'],
'text': ['foo', 'bar'],
'title': ['Foo', 'Bar'],
'embeddings': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('embeddings' , string_factory='Flat' , metric_type=faiss.METRIC_INNER_PRODUCT )
UpperCAmelCase__ : Tuple = os.path.join(self.tmpdirname , 'hf_bert_base.hnswSQ8_correct_phi_128.c_index' )
dataset.save_faiss_index('embeddings' , index_file_name + '.index.dpr' )
pickle.dump(dataset['id'] , open(index_file_name + '.index_meta.dpr' , 'wb' ) )
UpperCAmelCase__ : Dict = os.path.join(self.tmpdirname , 'psgs_w100.tsv.pkl' )
UpperCAmelCase__ : Dict = {sample['id']: [sample['text'], sample['title']] for sample in dataset}
pickle.dump(UpperCamelCase_ , open(UpperCamelCase_ , 'wb' ) )
UpperCAmelCase__ : int = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='legacy' , index_path=self.tmpdirname , )
UpperCAmelCase__ : Any = RagRetriever(
UpperCamelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def __snake_case ( self ):
UpperCAmelCase__ : List[str] = 1
UpperCAmelCase__ : Optional[Any] = self.get_dummy_canonical_hf_index_retriever()
UpperCAmelCase__ : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = retriever.retrieve(UpperCamelCase_ , n_docs=UpperCamelCase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCamelCase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , UpperCamelCase_ )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __snake_case ( self ):
UpperCAmelCase__ : Dict = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('transformers.models.rag.retrieval_rag.load_dataset' ) as mock_load_dataset:
UpperCAmelCase__ : Any = self.get_dummy_dataset()
retriever.save_pretrained(UpperCamelCase_ )
UpperCAmelCase__ : List[Any] = RagRetriever.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
UpperCAmelCase__ : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase__ : Union[str, Any] = retriever.retrieve(UpperCamelCase_ , n_docs=1 )
self.assertTrue(out is not None )
def __snake_case ( self ):
UpperCAmelCase__ : List[Any] = 1
UpperCAmelCase__ : str = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCamelCase_ )
UpperCAmelCase__ : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = retriever.retrieve(UpperCamelCase_ , n_docs=UpperCamelCase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCamelCase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , UpperCamelCase_ )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __snake_case ( self ):
UpperCAmelCase__ : int = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCamelCase_ )
UpperCAmelCase__ : str = RagRetriever.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
UpperCAmelCase__ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase__ : Any = retriever.retrieve(UpperCamelCase_ , n_docs=1 )
self.assertTrue(out is not None )
def __snake_case ( self ):
UpperCAmelCase__ : Optional[Any] = 1
UpperCAmelCase__ : List[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCamelCase_ )
UpperCAmelCase__ : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = retriever.retrieve(UpperCamelCase_ , n_docs=UpperCamelCase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCamelCase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['embeddings', 'id', 'text', 'title'] )
self.assertEqual(len(doc_dicts[0]['id'] ) , UpperCamelCase_ )
self.assertEqual(doc_dicts[0]['id'][0] , '1' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['id'][0] , '0' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __snake_case ( self ):
UpperCAmelCase__ : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCamelCase_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCamelCase_ )
UpperCAmelCase__ : List[str] = RagRetriever.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
UpperCAmelCase__ : Optional[int] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase__ : Any = retriever.retrieve(UpperCamelCase_ , n_docs=1 )
self.assertTrue(out is not None )
def __snake_case ( self ):
UpperCAmelCase__ : Union[str, Any] = 1
UpperCAmelCase__ : Union[str, Any] = self.get_dummy_legacy_index_retriever()
UpperCAmelCase__ : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Tuple = retriever.retrieve(UpperCamelCase_ , n_docs=UpperCamelCase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCamelCase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['text', 'title'] )
self.assertEqual(len(doc_dicts[0]['text'] ) , UpperCamelCase_ )
self.assertEqual(doc_dicts[0]['text'][0] , 'bar' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['text'][0] , 'foo' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __snake_case ( self ):
UpperCAmelCase__ : Tuple = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCamelCase_ )
UpperCAmelCase__ : Dict = RagRetriever.from_pretrained(UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
UpperCAmelCase__ : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase__ : List[str] = retriever.retrieve(UpperCamelCase_ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def __snake_case ( self ):
import torch
UpperCAmelCase__ : List[str] = 1
UpperCAmelCase__ : Optional[int] = self.get_dummy_canonical_hf_index_retriever()
UpperCAmelCase__ : Dict = [[5, 7], [10, 11]]
UpperCAmelCase__ : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase__ : str = retriever(UpperCamelCase_ , UpperCamelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCamelCase_ )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = (
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
UpperCAmelCase__ : List[Any] = retriever(
UpperCamelCase_ , UpperCamelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCamelCase_ , return_tensors='pt' , )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = ( # noqa: F841
out['context_input_ids'],
out['context_attention_mask'],
out['retrieved_doc_embeds'],
out['doc_ids'],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def __snake_case ( self ):
UpperCAmelCase__ : List[str] = self.get_dpr_ctx_encoder_tokenizer()
UpperCAmelCase__ : str = 1
UpperCAmelCase__ : Optional[int] = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCamelCase_ )
retriever.set_ctx_encoder_tokenizer(UpperCamelCase_ )
UpperCAmelCase__ : int = [[5, 7], [10, 11]]
UpperCAmelCase__ : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase__ : int = retriever(UpperCamelCase_ , UpperCamelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCamelCase_ )
self.assertEqual(
len(UpperCamelCase_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('tokenized_doc_ids', 'tokenized_doc_attention_mask') ) , UpperCamelCase_ ) # check for doc token related keys in dictionary.
| 110
|
"""simple docstring"""
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
UpperCamelCase__ = logging.getLogger(__name__)
class a :
def __init__( self ):
UpperCAmelCase__ : Union[str, Any] = False
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if not self.initialized:
UpperCAmelCase__ : Optional[Any] = RagRetriever(
UpperCamelCase_ , question_encoder_tokenizer=UpperCamelCase_ , generator_tokenizer=UpperCamelCase_ , index=UpperCamelCase_ , init_retrieval=UpperCamelCase_ , )
UpperCAmelCase__ : Union[str, Any] = True
def __snake_case ( self ):
self.retriever.index.init_index()
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.retriever._main_retrieve(UpperCamelCase_ , UpperCamelCase_ )
return doc_ids, retrieved_doc_embeds
class a ( lowercase ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ):
if index is not None and index.is_initialized() and len(UpperCamelCase_ ) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ' )
super().__init__(
UpperCamelCase_ , question_encoder_tokenizer=UpperCamelCase_ , generator_tokenizer=UpperCamelCase_ , index=UpperCamelCase_ , init_retrieval=UpperCamelCase_ , )
UpperCAmelCase__ : Union[str, Any] = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
for worker in self.retrieval_workers
] )
def __snake_case ( self ):
logger.info('initializing retrieval' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
UpperCAmelCase__ : Optional[Any] = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = ray.get(random_worker.retrieve.remote(UpperCamelCase_ , UpperCamelCase_ ) )
else:
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self._main_retrieve(UpperCamelCase_ , UpperCamelCase_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCamelCase_ )
@classmethod
def __snake_case ( cls , UpperCamelCase_ , UpperCamelCase_=None , **UpperCamelCase_ ):
return super(UpperCamelCase_ , cls ).get_tokenizers(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def __snake_case ( cls , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , **UpperCamelCase_ ):
UpperCAmelCase__ : Union[str, Any] = kwargs.pop('config' , UpperCamelCase_ ) or RagConfig.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
UpperCAmelCase__ : Union[str, Any] = RagTokenizer.from_pretrained(UpperCamelCase_ , config=UpperCamelCase_ )
UpperCAmelCase__ : List[Any] = rag_tokenizer.question_encoder
UpperCAmelCase__ : Optional[Any] = rag_tokenizer.generator
if indexed_dataset is not None:
UpperCAmelCase__ : List[str] = 'custom'
UpperCAmelCase__ : Tuple = CustomHFIndex(config.retrieval_vector_size , UpperCamelCase_ )
else:
UpperCAmelCase__ : Optional[Any] = cls._build_index(UpperCamelCase_ )
return cls(
UpperCamelCase_ , question_encoder_tokenizer=UpperCamelCase_ , generator_tokenizer=UpperCamelCase_ , retrieval_workers=UpperCamelCase_ , index=UpperCamelCase_ , )
| 110
| 1
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> Tuple:
_lowerCamelCase : List[Any] = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
_lowerCamelCase : str = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
_lowerCamelCase : Tuple = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
_lowerCamelCase : Tuple = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 16000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
_lowerCamelCase : List[str] = tempfile.mkdtemp()
_lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase : int = os.path.join(self.tmpdirname , _lowercase )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowercase ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowercase ) + '''\n''' )
# load decoder from hub
_lowerCamelCase : Optional[Any] = '''hf-internal-testing/ngram-beam-search-decoder'''
def a__ ( self , **_lowercase ) -> Optional[Any]:
_lowerCamelCase : str = self.add_kwargs_tokens_map.copy()
kwargs.update(_lowercase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def a__ ( self , **_lowercase ) -> Dict:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_lowercase )
def a__ ( self , **_lowercase ) -> int:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_lowercase )
def a__ ( self ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def a__ ( self ) -> Optional[int]:
_lowerCamelCase : Optional[int] = self.get_tokenizer()
_lowerCamelCase : Dict = self.get_feature_extractor()
_lowerCamelCase : Any = self.get_decoder()
_lowerCamelCase : str = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase )
processor.save_pretrained(self.tmpdirname )
_lowerCamelCase : Optional[int] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowercase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _lowercase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _lowercase )
def a__ ( self ) -> Dict:
_lowerCamelCase : List[Any] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_lowerCamelCase : Dict = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def a__ ( self ) -> str:
_lowerCamelCase : Dict = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(_lowercase , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=_lowercase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def a__ ( self ) -> List[str]:
_lowerCamelCase : List[str] = self.get_feature_extractor()
_lowerCamelCase : Union[str, Any] = self.get_tokenizer()
_lowerCamelCase : Tuple = self.get_decoder()
_lowerCamelCase : Dict = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase )
_lowerCamelCase : str = floats_list((3, 1000) )
_lowerCamelCase : Union[str, Any] = feature_extractor(_lowercase , return_tensors='''np''' )
_lowerCamelCase : Union[str, Any] = processor(_lowercase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a__ ( self ) -> List[Any]:
_lowerCamelCase : Optional[Any] = self.get_feature_extractor()
_lowerCamelCase : Tuple = self.get_tokenizer()
_lowerCamelCase : Dict = self.get_decoder()
_lowerCamelCase : int = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase )
_lowerCamelCase : List[Any] = '''This is a test string'''
_lowerCamelCase : Tuple = processor(text=_lowercase )
_lowerCamelCase : List[Any] = tokenizer(_lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a__ ( self , _lowercase=(2, 10, 16) , _lowercase=77 ) -> Tuple:
np.random.seed(_lowercase )
return np.random.rand(*_lowercase )
def a__ ( self ) -> Optional[int]:
_lowerCamelCase : Optional[Any] = self.get_feature_extractor()
_lowerCamelCase : Optional[Any] = self.get_tokenizer()
_lowerCamelCase : List[str] = self.get_decoder()
_lowerCamelCase : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase )
_lowerCamelCase : List[str] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
_lowerCamelCase : Optional[Any] = processor.decode(_lowercase )
_lowerCamelCase : Union[str, Any] = decoder.decode_beams(_lowercase )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def a__ ( self , _lowercase ) -> Any:
_lowerCamelCase : List[Any] = self.get_feature_extractor()
_lowerCamelCase : Union[str, Any] = self.get_tokenizer()
_lowerCamelCase : Any = self.get_decoder()
_lowerCamelCase : List[str] = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase )
_lowerCamelCase : Any = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_lowerCamelCase : List[str] = processor.batch_decode(_lowercase )
else:
with get_context(_lowercase ).Pool() as pool:
_lowerCamelCase : Optional[int] = processor.batch_decode(_lowercase , _lowercase )
_lowerCamelCase : Optional[int] = list(_lowercase )
with get_context('''fork''' ).Pool() as p:
_lowerCamelCase : Optional[Any] = decoder.decode_beams_batch(_lowercase , _lowercase )
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_lowercase , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(_lowercase , decoded_processor.logit_score )
self.assertListEqual(_lowercase , decoded_processor.lm_score )
def a__ ( self ) -> Any:
_lowerCamelCase : Any = self.get_feature_extractor()
_lowerCamelCase : List[Any] = self.get_tokenizer()
_lowerCamelCase : int = self.get_decoder()
_lowerCamelCase : Any = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase )
_lowerCamelCase : List[str] = self._get_dummy_logits()
_lowerCamelCase : int = 15
_lowerCamelCase : Union[str, Any] = -20.0
_lowerCamelCase : Optional[Any] = -4.0
_lowerCamelCase : str = processor.batch_decode(
_lowercase , beam_width=_lowercase , beam_prune_logp=_lowercase , token_min_logp=_lowercase , )
_lowerCamelCase : Optional[Any] = decoded_processor_out.text
_lowerCamelCase : str = list(_lowercase )
with get_context('''fork''' ).Pool() as pool:
_lowerCamelCase : List[Any] = decoder.decode_beams_batch(
_lowercase , _lowercase , beam_width=_lowercase , beam_prune_logp=_lowercase , token_min_logp=_lowercase , )
_lowerCamelCase : Tuple = [d[0][0] for d in decoded_decoder_out]
_lowerCamelCase : Any = [d[0][2] for d in decoded_decoder_out]
_lowerCamelCase : Optional[Any] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_lowercase , _lowercase )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , _lowercase )
self.assertTrue(np.array_equal(_lowercase , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , _lowercase , atol=1E-3 ) )
self.assertTrue(np.array_equal(_lowercase , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , _lowercase , atol=1E-3 ) )
def a__ ( self ) -> str:
_lowerCamelCase : Any = self.get_feature_extractor()
_lowerCamelCase : Optional[int] = self.get_tokenizer()
_lowerCamelCase : List[str] = self.get_decoder()
_lowerCamelCase : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase )
_lowerCamelCase : Optional[int] = self._get_dummy_logits()
_lowerCamelCase : Optional[int] = 2.0
_lowerCamelCase : Optional[Any] = 5.0
_lowerCamelCase : int = -20.0
_lowerCamelCase : Any = True
_lowerCamelCase : List[Any] = processor.batch_decode(
_lowercase , alpha=_lowercase , beta=_lowercase , unk_score_offset=_lowercase , lm_score_boundary=_lowercase , )
_lowerCamelCase : Optional[Any] = decoded_processor_out.text
_lowerCamelCase : Optional[int] = list(_lowercase )
decoder.reset_params(
alpha=_lowercase , beta=_lowercase , unk_score_offset=_lowercase , lm_score_boundary=_lowercase , )
with get_context('''fork''' ).Pool() as pool:
_lowerCamelCase : Tuple = decoder.decode_beams_batch(
_lowercase , _lowercase , )
_lowerCamelCase : Optional[int] = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_lowercase , _lowercase )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , _lowercase )
_lowerCamelCase : int = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , _lowercase )
def a__ ( self ) -> Dict:
_lowerCamelCase : List[Any] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase : Dict = processor.decoder.model_container[processor.decoder._model_key]
_lowerCamelCase : Union[str, Any] = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_lowerCamelCase : Optional[Any] = os.listdir(_lowercase )
_lowerCamelCase : str = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_lowercase , _lowercase )
def a__ ( self ) -> int:
_lowerCamelCase : Union[str, Any] = snapshot_download('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase : Dict = WavaVecaProcessorWithLM.from_pretrained(_lowercase )
_lowerCamelCase : str = processor.decoder.model_container[processor.decoder._model_key]
_lowerCamelCase : Dict = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_lowerCamelCase : Optional[Any] = os.listdir(_lowercase )
_lowerCamelCase : Optional[Any] = os.listdir(_lowercase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_lowercase , _lowercase )
def a__ ( self ) -> Optional[int]:
_lowerCamelCase : List[str] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase : Optional[Any] = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase : int = floats_list((3, 1000) )
_lowerCamelCase : Any = processor_wavaveca(_lowercase , return_tensors='''np''' )
_lowerCamelCase : List[str] = processor_auto(_lowercase , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
_lowerCamelCase : List[Any] = self._get_dummy_logits()
_lowerCamelCase : Any = processor_wavaveca.batch_decode(_lowercase )
_lowerCamelCase : str = processor_auto.batch_decode(_lowercase )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def a__ ( self ) -> Dict:
_lowerCamelCase : List[str] = self.get_feature_extractor()
_lowerCamelCase : Dict = self.get_tokenizer()
_lowerCamelCase : Tuple = self.get_decoder()
_lowerCamelCase : str = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def a__ ( _lowercase , _lowercase ) -> Tuple:
_lowerCamelCase : Dict = [d[key] for d in offsets]
return retrieved_list
def a__ ( self ) -> Tuple:
_lowerCamelCase : Dict = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase : int = self._get_dummy_logits()[0]
_lowerCamelCase : str = processor.decode(_lowercase , output_word_offsets=_lowercase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_lowercase , _lowercase ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def a__ ( self ) -> Tuple:
_lowerCamelCase : List[str] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_lowerCamelCase : str = self._get_dummy_logits()
_lowerCamelCase : Any = processor.batch_decode(_lowercase , output_word_offsets=_lowercase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_lowercase , _lowercase ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(_lowercase , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def a__ ( self ) -> List[str]:
import torch
_lowerCamelCase : Union[str, Any] = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=_lowercase )
_lowerCamelCase : Union[str, Any] = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=16000 ) )
_lowerCamelCase : List[str] = iter(_lowercase )
_lowerCamelCase : List[str] = next(_lowercase )
_lowerCamelCase : int = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
_lowerCamelCase : Tuple = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_lowerCamelCase : List[str] = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
_lowerCamelCase : List[str] = model(_lowercase ).logits.cpu().numpy()
_lowerCamelCase : Optional[Any] = processor.decode(logits[0] , output_word_offsets=_lowercase )
_lowerCamelCase : Tuple = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_lowerCamelCase : Union[str, Any] = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
_lowerCamelCase : Dict = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(_lowercase , '''word''' ) ) , _lowercase )
self.assertEqual(''' '''.join(self.get_from_offsets(_lowercase , '''word''' ) ) , output.text )
# output times
_lowerCamelCase : str = torch.tensor(self.get_from_offsets(_lowercase , '''start_time''' ) )
_lowerCamelCase : Optional[int] = torch.tensor(self.get_from_offsets(_lowercase , '''end_time''' ) )
# fmt: off
_lowerCamelCase : Tuple = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
_lowerCamelCase : Optional[Any] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=0.01 ) )
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=0.01 ) )
| 558
|
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any =9.8_0665
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = g ) ->float:
if fluid_density <= 0:
raise ValueError('''Impossible fluid density''' )
if volume < 0:
raise ValueError('''Impossible Object volume''' )
if gravity <= 0:
raise ValueError('''Impossible Gravity''' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 558
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case = {
"configuration_vision_encoder_decoder": ["VisionEncoderDecoderConfig", "VisionEncoderDecoderOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["VisionEncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["TFVisionEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["FlaxVisionEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 424
|
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class __A ( snake_case__ ,unittest.TestCase ):
'''simple docstring'''
a_ = BlenderbotSmallTokenizer
a_ = False
def SCREAMING_SNAKE_CASE__ ( self ):
super().setUp()
_lowerCAmelCase : Optional[int] = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"]
_lowerCAmelCase : Tuple = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
_lowerCAmelCase : Tuple = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""]
_lowerCAmelCase : str = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"}
_lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_snake_case ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_snake_case ) )
def SCREAMING_SNAKE_CASE__ ( self , **_snake_case ):
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **_snake_case )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
_lowerCAmelCase : int = "adapt act apte"
_lowerCAmelCase : int = "adapt act apte"
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Optional[int] = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCAmelCase : str = "adapt act apte"
_lowerCAmelCase : Any = ["adapt", "act", "ap@@", "te"]
_lowerCAmelCase : str = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
_lowerCAmelCase : Optional[int] = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_lowerCAmelCase : Optional[Any] = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : List[Any] = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
assert tok("sam" ).input_ids == [1384]
_lowerCAmelCase : Optional[Any] = "I am a small frog."
_lowerCAmelCase : List[str] = tok([src_text] , padding=_snake_case , truncation=_snake_case )["input_ids"]
_lowerCAmelCase : Optional[Any] = tok.batch_decode(_snake_case , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : int = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
_lowerCAmelCase : List[Any] = "I am a small frog ."
_lowerCAmelCase : int = "."
_lowerCAmelCase : Union[str, Any] = tok(_snake_case )["input_ids"]
_lowerCAmelCase : Optional[int] = tok(_snake_case )["input_ids"]
assert encoded[-1] == encoded_dot[0]
| 424
| 1
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ : Tuple = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Any = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
snake_case__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 711
|
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
snake_case__ : Tuple = logging.get_logger(__name__)
snake_case__ : Optional[int] = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = "detr"
_a = ["past_key_values"]
_a = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self : int , __a : Dict=True , __a : Union[str, Any]=None , __a : Union[str, Any]=3 , __a : Dict=100 , __a : str=6 , __a : List[str]=2_048 , __a : Any=8 , __a : List[str]=6 , __a : List[str]=2_048 , __a : str=8 , __a : Tuple=0.0 , __a : Dict=0.0 , __a : Optional[int]=True , __a : Union[str, Any]="relu" , __a : Optional[int]=256 , __a : Tuple=0.1 , __a : List[str]=0.0 , __a : Tuple=0.0 , __a : Tuple=0.02 , __a : Optional[Any]=1.0 , __a : List[str]=False , __a : Optional[int]="sine" , __a : Optional[Any]="resnet50" , __a : Optional[int]=True , __a : Dict=False , __a : Union[str, Any]=1 , __a : Optional[Any]=5 , __a : List[Any]=2 , __a : Any=1 , __a : int=1 , __a : List[str]=5 , __a : int=2 , __a : Any=0.1 , **__a : List[Any] , ) ->str:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCamelCase_ : Optional[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__a , __a ):
lowerCamelCase_ : List[Any] = backbone_config.get("""model_type""" )
lowerCamelCase_ : Optional[int] = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase_ : List[str] = config_class.from_dict(__a )
# set timm attributes to None
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : Any = None, None, None
lowerCamelCase_ : Dict = use_timm_backbone
lowerCamelCase_ : Optional[Any] = backbone_config
lowerCamelCase_ : List[Any] = num_channels
lowerCamelCase_ : int = num_queries
lowerCamelCase_ : int = d_model
lowerCamelCase_ : Union[str, Any] = encoder_ffn_dim
lowerCamelCase_ : Union[str, Any] = encoder_layers
lowerCamelCase_ : List[str] = encoder_attention_heads
lowerCamelCase_ : Any = decoder_ffn_dim
lowerCamelCase_ : Union[str, Any] = decoder_layers
lowerCamelCase_ : List[Any] = decoder_attention_heads
lowerCamelCase_ : Optional[Any] = dropout
lowerCamelCase_ : List[str] = attention_dropout
lowerCamelCase_ : List[Any] = activation_dropout
lowerCamelCase_ : Union[str, Any] = activation_function
lowerCamelCase_ : int = init_std
lowerCamelCase_ : Optional[Any] = init_xavier_std
lowerCamelCase_ : Any = encoder_layerdrop
lowerCamelCase_ : List[Any] = decoder_layerdrop
lowerCamelCase_ : Union[str, Any] = encoder_layers
lowerCamelCase_ : Any = auxiliary_loss
lowerCamelCase_ : Tuple = position_embedding_type
lowerCamelCase_ : Optional[int] = backbone
lowerCamelCase_ : Union[str, Any] = use_pretrained_backbone
lowerCamelCase_ : int = dilation
# Hungarian matcher
lowerCamelCase_ : str = class_cost
lowerCamelCase_ : Union[str, Any] = bbox_cost
lowerCamelCase_ : Tuple = giou_cost
# Loss coefficients
lowerCamelCase_ : Optional[int] = mask_loss_coefficient
lowerCamelCase_ : int = dice_loss_coefficient
lowerCamelCase_ : str = bbox_loss_coefficient
lowerCamelCase_ : List[str] = giou_loss_coefficient
lowerCamelCase_ : int = eos_coefficient
super().__init__(is_encoder_decoder=__a , **__a )
@property
def _lowerCAmelCase ( self : Union[str, Any] ) ->int:
return self.encoder_attention_heads
@property
def _lowerCAmelCase ( self : List[str] ) ->int:
return self.d_model
@classmethod
def _lowerCAmelCase ( cls : Tuple , __a : PretrainedConfig , **__a : Dict ) ->Optional[int]:
return cls(backbone_config=__a , **__a )
def _lowerCAmelCase ( self : List[Any] ) ->Dict[str, any]:
lowerCamelCase_ : Optional[Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCamelCase_ : List[str] = self.backbone_config.to_dict()
lowerCamelCase_ : Union[str, Any] = self.__class__.model_type
return output
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = version.parse("1.11" )
@property
def _lowerCAmelCase ( self : List[str] ) ->Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _lowerCAmelCase ( self : Optional[Any] ) ->float:
return 1e-5
@property
def _lowerCAmelCase ( self : Union[str, Any] ) ->int:
return 12
| 171
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a = {
"""configuration_longformer""": [
"""LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LongformerConfig""",
"""LongformerOnnxConfig""",
],
"""tokenization_longformer""": ["""LongformerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ["""LongformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"""LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongformerForMaskedLM""",
"""LongformerForMultipleChoice""",
"""LongformerForQuestionAnswering""",
"""LongformerForSequenceClassification""",
"""LongformerForTokenClassification""",
"""LongformerModel""",
"""LongformerPreTrainedModel""",
"""LongformerSelfAttention""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"""TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLongformerForMaskedLM""",
"""TFLongformerForMultipleChoice""",
"""TFLongformerForQuestionAnswering""",
"""TFLongformerForSequenceClassification""",
"""TFLongformerForTokenClassification""",
"""TFLongformerModel""",
"""TFLongformerPreTrainedModel""",
"""TFLongformerSelfAttention""",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
_a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 19
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : Optional[int] ={
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str =["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int =[
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 136
| 0
|
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__lowerCAmelCase : Any =logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class _A ( unittest.TestCase ):
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = True , ):
"""simple docstring"""
lowercase = [file for file in os.listdir(__lowerCAmelCase ) if os.path.isfile(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )]
if identifier is not None:
lowercase = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
for n_ in n_identifier:
lowercase = [file for file in files if n_ not in file]
else:
lowercase = [file for file in files if n_identifier not in file]
lowercase = ignore_files or []
ignore_files.append("""__init__.py""" )
lowercase = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , __lowerCAmelCase )
if only_modules:
lowercase = file.split(""".""" )[0]
try:
lowercase = getattr(__lowerCAmelCase , __lowerCAmelCase )
lowercase = doctest.DocTestSuite(__lowerCAmelCase )
lowercase = unittest.TextTestRunner().run(__lowerCAmelCase )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'{module_identifier} is not a module.' )
else:
lowercase = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def A__ ( self ):
"""simple docstring"""
lowercase = Path("""src/transformers""" )
lowercase = """modeling"""
lowercase = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(__lowerCAmelCase , identifier=__lowerCAmelCase , ignore_files=__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = Path("""src/transformers""" )
lowercase = """tokenization"""
self.analyze_directory(__lowerCAmelCase , identifier=__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = Path("""src/transformers""" )
lowercase = """configuration"""
self.analyze_directory(__lowerCAmelCase , identifier=__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = Path("""src/transformers""" )
lowercase = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(__lowerCAmelCase , n_identifier=__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = Path("""docs/source""" )
lowercase = ["""favicon.ico"""]
self.analyze_directory(__lowerCAmelCase , ignore_files=__lowerCAmelCase , only_modules=__lowerCAmelCase )
| 716
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ ( lowerCAmelCase__ :list[float] , lowerCAmelCase__ :list[float] ) -> float:
'''simple docstring'''
lowercase = sorted(numsa + numsa )
lowercase , lowercase = divmod(len(lowerCAmelCase__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : List[Any] =[float(x) for x in input("""Enter the elements of first array: """).split()]
__lowerCAmelCase : List[Any] =[float(x) for x in input("""Enter the elements of second array: """).split()]
print(F"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 197
| 0
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
_UpperCamelCase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def _lowercase (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__A : Dict = {}
with open(SCREAMING_SNAKE_CASE , "r" ) as file:
for line_number, line in enumerate(SCREAMING_SNAKE_CASE ):
__A : str = line.strip()
if line:
__A : Union[str, Any] = line.split()
__A : List[str] = line_number
__A : Optional[Any] = words[0]
__A : Any = value
return result
def _lowercase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for attribute in key.split("." ):
__A : Dict = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__A : Optional[Any] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(SCREAMING_SNAKE_CASE ):
__A : List[Any] = PARAM_MAPPING[full_name.split("." )[-1]]
__A : List[str] = "param"
if weight_type is not None and weight_type != "param":
__A : Dict = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
elif weight_type is not None and weight_type == "param":
__A : Optional[Any] = hf_pointer
for attribute in hf_param_name.split("." ):
__A : int = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__A : str = shape_pointer.shape
# let's reduce dimension
__A : List[Any] = value[0]
else:
__A : int = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
__A : Optional[int] = value
elif weight_type == "weight_g":
__A : Any = value
elif weight_type == "weight_v":
__A : Tuple = value
elif weight_type == "bias":
__A : Tuple = value
elif weight_type == "param":
for attribute in hf_param_name.split("." ):
__A : Dict = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__A : List[str] = value
else:
__A : List[Any] = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _lowercase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__A : Optional[int] = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(SCREAMING_SNAKE_CASE ):
__A : Optional[Any] = PARAM_MAPPING[full_name.split("." )[-1]]
__A : str = "param"
if weight_type is not None and weight_type != "param":
__A : Union[str, Any] = ".".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__A : List[str] = ".".join([key, hf_param_name] )
else:
__A : Union[str, Any] = key
__A : Optional[Any] = value if "lm_head" in full_key else value[0]
_UpperCamelCase = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def _lowercase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
__A : Optional[Any] = False
for key, mapped_key in MAPPING.items():
__A : int = "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
__A : List[Any] = True
if "*" in mapped_key:
__A : Any = name.split(SCREAMING_SNAKE_CASE )[0].split("." )[-2]
__A : Any = mapped_key.replace("*" , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__A : Dict = "weight_g"
elif "weight_v" in name:
__A : Union[str, Any] = "weight_v"
elif "bias" in name:
__A : List[Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__A : List[Any] = "weight"
else:
__A : Any = None
if hf_dict is not None:
rename_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return is_used
return is_used
def _lowercase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__A : Tuple = []
__A : Any = fairseq_model.state_dict()
__A : str = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__A : List[str] = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == "group" , )
__A : Optional[int] = True
else:
__A : Union[str, Any] = load_wavaveca_layer(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(f"Unused weights: {unused_weights}" )
def _lowercase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__A : List[str] = full_name.split("conv_layers." )[-1]
__A : Union[str, Any] = name.split("." )
__A : Union[str, Any] = int(items[0] )
__A : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
__A : List[Any] = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
__A : str = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
__A : Optional[int] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
__A : Tuple = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
@torch.no_grad()
def _lowercase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
if config_path is not None:
__A : int = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
__A : Any = WavaVecaConfig()
if is_seq_class:
__A : Tuple = read_txt_into_dict(SCREAMING_SNAKE_CASE )
__A : Union[str, Any] = idalabel
__A : int = WavaVecaForSequenceClassification(SCREAMING_SNAKE_CASE )
__A : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE )
elif is_finetuned:
if dict_path:
__A : Any = Dictionary.load(SCREAMING_SNAKE_CASE )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__A : Optional[int] = target_dict.pad_index
__A : Union[str, Any] = target_dict.bos_index
__A : Tuple = target_dict.eos_index
__A : int = len(target_dict.symbols )
__A : Tuple = os.path.join(SCREAMING_SNAKE_CASE , "vocab.json" )
if not os.path.isdir(SCREAMING_SNAKE_CASE ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(SCREAMING_SNAKE_CASE ) )
return
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
__A : Union[str, Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
__A : Union[str, Any] = 0
__A : List[str] = 1
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__A : Optional[int] = WavaVecaCTCTokenizer(
SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=SCREAMING_SNAKE_CASE , )
__A : str = True if config.feat_extract_norm == "layer" else False
__A : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
__A : int = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
__A : Tuple = WavaVecaForCTC(SCREAMING_SNAKE_CASE )
else:
__A : Any = WavaVecaForPreTraining(SCREAMING_SNAKE_CASE )
if is_finetuned or is_seq_class:
__A ,__A ,__A : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
__A : Optional[Any] = argparse.Namespace(task="audio_pretraining" )
__A : Optional[Any] = fairseq.tasks.setup_task(SCREAMING_SNAKE_CASE )
__A ,__A ,__A : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=SCREAMING_SNAKE_CASE )
__A : Optional[int] = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , not is_finetuned )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
_UpperCamelCase = parser.parse_args()
_UpperCamelCase = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 111
|
'''simple docstring'''
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
_UpperCamelCase = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
_UpperCamelCase = """pt""" if is_torch_available() else """tf"""
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = CamembertTokenizer
lowerCamelCase__ = CamembertTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
def lowerCAmelCase__ ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__A : List[Any] = CamembertTokenizer(lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Optional[Any] = "<pad>"
__A : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) , lowerCamelCase )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>NOTUSED" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(lowerCamelCase ) , 1004 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Optional[Any] = CamembertTokenizer(lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
__A : Optional[int] = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
__A : str = "I was born in 92000, and this is falsé."
__A : List[Any] = tokenizer.encode(lowerCamelCase )
__A : Tuple = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__A : Dict = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
__A : str = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
__A : int = tokenizer.convert_ids_to_tokens(lowerCamelCase )
__A : Optional[int] = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__A : Tuple = self.get_tokenizer()
__A : List[Any] = self.get_rust_tokenizer()
__A : Any = "I was born in 92000, and this is falsé."
__A : int = tokenizer.tokenize(lowerCamelCase )
__A : Any = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__A : Any = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
__A : Optional[int] = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__A : Union[str, Any] = self.get_rust_tokenizer()
__A : Optional[Any] = tokenizer.encode(lowerCamelCase )
__A : Dict = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Any = {"input_ids": [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 2_7575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 2_2804, 1_8818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 1_0326, 24, 2267, 20, 416, 5072, 1_5612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
__A : int = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase , model_name="camembert-base" , revision="3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf" , sequences=lowerCamelCase , )
| 111
| 1
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : Any ):
lowerCamelCase :List[str] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowerCamelCase :Any = get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(__snake_case ) , torch_builtin(__snake_case ) ) )
self.assertFalse(torch.allclose(gelu_python(__snake_case ) , gelu_new(__snake_case ) ) )
def snake_case ( self : List[Any] ):
lowerCamelCase :int = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowerCamelCase :List[Any] = get_activation('''gelu''' )
lowerCamelCase :Any = get_activation('''gelu_10''' )
lowerCamelCase :int = torch_builtin(__snake_case )
lowerCamelCase :Any = geluaa(__snake_case )
lowerCamelCase :Dict = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(__snake_case ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def snake_case ( self : Optional[int] ):
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(__snake_case ):
get_activation('''bogus''' )
with self.assertRaises(__snake_case ):
get_activation(__snake_case )
def snake_case ( self : List[Any] ):
lowerCamelCase :List[Any] = get_activation('''gelu''' )
lowerCamelCase :Optional[Any] = 1
lowerCamelCase :List[str] = get_activation('''gelu''' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__snake_case ):
lowerCamelCase :Dict = acta.a
| 702
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
A__ = imread(R"""digital_image_processing/image_data/lena_small.jpg""")
A__ = cvtColor(img, COLOR_BGR2GRAY)
def _lowerCamelCase ( ):
lowerCamelCase :int = cn.convert_to_negative(a_)
# assert negative_img array for at least one True
assert negative_img.any()
def _lowerCamelCase ( ):
with Image.open('''digital_image_processing/image_data/lena_small.jpg''') as img:
# Work around assertion for response
assert str(cc.change_contrast(a_ , 1_10)).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''')
def _lowerCamelCase ( ):
lowerCamelCase :Optional[Any] = canny.gen_gaussian_kernel(9 , sigma=1.4)
# Assert ambiguous array
assert resp.all()
def _lowerCamelCase ( ):
lowerCamelCase :str = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0)
# assert ambiguous array for all == True
assert canny_img.all()
lowerCamelCase :Optional[Any] = canny.canny(a_)
# assert canny array for at least one True
assert canny_array.any()
def _lowerCamelCase ( ):
assert gg.gaussian_filter(a_ , 5 , sigma=0.9).all()
def _lowerCamelCase ( ):
# laplace diagonals
lowerCamelCase :List[Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]])
lowerCamelCase :List[Any] = conv.img_convolve(a_ , a_).astype(a_)
assert res.any()
def _lowerCamelCase ( ):
assert med.median_filter(a_ , 3).any()
def _lowerCamelCase ( ):
lowerCamelCase , lowerCamelCase :Union[str, Any] = sob.sobel_filter(a_)
assert grad.any() and theta.any()
def _lowerCamelCase ( ):
lowerCamelCase :Dict = sp.make_sepia(a_ , 20)
assert sepia.all()
def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg"):
lowerCamelCase :Any = bs.Burkes(imread(a_ , 1) , 1_20)
burkes.process()
assert burkes.output_img.any()
def _lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg" , ):
lowerCamelCase :Tuple = rs.NearestNeighbour(imread(a_ , 1) , 4_00 , 2_00)
nn.process()
assert nn.output.any()
def _lowerCamelCase ( ):
lowerCamelCase :Tuple = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
lowerCamelCase :Tuple = imread(a_ , 0)
# Test for get_neighbors_pixel function() return not None
lowerCamelCase :Dict = 0
lowerCamelCase :Optional[Any] = 0
lowerCamelCase :str = image[x_coordinate][y_coordinate]
lowerCamelCase :Any = lbp.get_neighbors_pixel(
a_ , a_ , a_ , a_)
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
lowerCamelCase :int = np.zeros((image.shape[0], image.shape[1]))
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0]):
for j in range(0 , image.shape[1]):
lowerCamelCase :Optional[int] = lbp.local_binary_value(a_ , a_ , a_)
assert lbp_image.any()
| 49
| 0
|
'''simple docstring'''
def UpperCAmelCase_ ( lowerCAmelCase_ ):
"""simple docstring"""
lowercase = len(lowerCAmelCase_ )
for i in range(1 , lowerCAmelCase_ ):
lowercase = collection[i]
lowercase = 0
lowercase = i - 1
while low <= high:
lowercase = (low + high) // 2
if val < collection[mid]:
lowercase = mid - 1
else:
lowercase = mid + 1
for j in range(lowerCAmelCase_ , lowerCAmelCase_ , -1 ):
lowercase = collection[j - 1]
lowercase = val
return collection
if __name__ == "__main__":
__lowerCamelCase : Dict = input("Enter numbers separated by a comma:\n").strip()
__lowerCamelCase : Union[str, Any] = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 310
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class UpperCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase__ (self : Any ) -> Optional[Any]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowercase = AutoConfig.from_pretrained(A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
lowercase = TFAutoModel.from_pretrained(A__ , from_pt=A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
lowercase = AutoModel.from_pretrained(A__ , from_tf=A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
@slow
def UpperCAmelCase__ (self : str ) -> Optional[int]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowercase = AutoConfig.from_pretrained(A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
lowercase = TFAutoModelForPreTraining.from_pretrained(A__ , from_pt=A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
lowercase = AutoModelForPreTraining.from_pretrained(A__ , from_tf=A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
@slow
def UpperCAmelCase__ (self : Any ) -> int:
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = AutoConfig.from_pretrained(A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
lowercase = TFAutoModelForCausalLM.from_pretrained(A__ , from_pt=A__ )
lowercase , lowercase = TFAutoModelForCausalLM.from_pretrained(
A__ , output_loading_info=A__ , from_pt=A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
lowercase = AutoModelForCausalLM.from_pretrained(A__ , from_tf=A__ )
lowercase , lowercase = AutoModelForCausalLM.from_pretrained(
A__ , output_loading_info=A__ , from_tf=A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
@slow
def UpperCAmelCase__ (self : Tuple ) -> List[str]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = AutoConfig.from_pretrained(A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
lowercase = TFAutoModelWithLMHead.from_pretrained(A__ , from_pt=A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
lowercase = AutoModelWithLMHead.from_pretrained(A__ , from_tf=A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
@slow
def UpperCAmelCase__ (self : List[Any] ) -> Optional[Any]:
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = AutoConfig.from_pretrained(A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
lowercase = TFAutoModelForMaskedLM.from_pretrained(A__ , from_pt=A__ )
lowercase , lowercase = TFAutoModelForMaskedLM.from_pretrained(
A__ , output_loading_info=A__ , from_pt=A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
lowercase = AutoModelForMaskedLM.from_pretrained(A__ , from_tf=A__ )
lowercase , lowercase = AutoModelForMaskedLM.from_pretrained(
A__ , output_loading_info=A__ , from_tf=A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
@slow
def UpperCAmelCase__ (self : List[Any] ) -> Optional[Any]:
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = AutoConfig.from_pretrained(A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
lowercase = TFAutoModelForSeqaSeqLM.from_pretrained(A__ , from_pt=A__ )
lowercase , lowercase = TFAutoModelForSeqaSeqLM.from_pretrained(
A__ , output_loading_info=A__ , from_pt=A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
lowercase = AutoModelForSeqaSeqLM.from_pretrained(A__ , from_tf=A__ )
lowercase , lowercase = AutoModelForSeqaSeqLM.from_pretrained(
A__ , output_loading_info=A__ , from_tf=A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
@slow
def UpperCAmelCase__ (self : Tuple ) -> Optional[int]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowercase = AutoConfig.from_pretrained(A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
lowercase = TFAutoModelForSequenceClassification.from_pretrained(A__ , from_pt=A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
lowercase = AutoModelForSequenceClassification.from_pretrained(A__ , from_tf=A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
@slow
def UpperCAmelCase__ (self : List[Any] ) -> Union[str, Any]:
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
lowercase = AutoConfig.from_pretrained(A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
lowercase = TFAutoModelForQuestionAnswering.from_pretrained(A__ , from_pt=A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
lowercase = AutoModelForQuestionAnswering.from_pretrained(A__ , from_tf=A__ )
self.assertIsNotNone(A__ )
self.assertIsInstance(A__ , A__ )
def UpperCAmelCase__ (self : List[str] ) -> Optional[Any]:
lowercase = TFAutoModelWithLMHead.from_pretrained(A__ , from_pt=A__ )
self.assertIsInstance(A__ , A__ )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=A__ ) , 1_4_4_1_0 )
lowercase = AutoModelWithLMHead.from_pretrained(A__ , from_tf=A__ )
self.assertIsInstance(A__ , A__ )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=A__ ) , 1_4_4_1_0 )
def UpperCAmelCase__ (self : List[str] ) -> List[str]:
lowercase = TFAutoModelWithLMHead.from_pretrained(A__ , from_pt=A__ )
self.assertIsInstance(A__ , A__ )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=A__ ) , 1_4_4_1_0 )
lowercase = AutoModelWithLMHead.from_pretrained(A__ , from_tf=A__ )
self.assertIsInstance(A__ , A__ )
self.assertEqual(model.num_parameters() , 1_4_4_1_0 )
self.assertEqual(model.num_parameters(only_trainable=A__ ) , 1_4_4_1_0 )
| 310
| 1
|
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
SCREAMING_SNAKE_CASE : List[str] = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
SCREAMING_SNAKE_CASE : Any = {
0: "Sunday",
1: "Monday",
2: "Tuesday",
3: "Wednesday",
4: "Thursday",
5: "Friday",
6: "Saturday",
}
def _UpperCamelCase ( lowerCAmelCase__: int ,lowerCAmelCase__: int ,lowerCAmelCase__: int ) -> str:
assert len(str(lowerCAmelCase__ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
SCREAMING_SNAKE_CASE_ = year // 100
SCREAMING_SNAKE_CASE_ = (5 * (century % 4) + 2) % 7
SCREAMING_SNAKE_CASE_ = year % 100
SCREAMING_SNAKE_CASE_ = centurian % 12
SCREAMING_SNAKE_CASE_ = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
SCREAMING_SNAKE_CASE_ = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
SCREAMING_SNAKE_CASE_ = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 238
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : str = {
"configuration_pegasus_x": ["PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP", "PegasusXConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Dict = [
"PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST",
"PegasusXForConditionalGeneration",
"PegasusXModel",
"PegasusXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 238
| 1
|
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase__ ( __magic_name__ , __magic_name__ ) ->int:
if len(__magic_name__ ) < k or k < 0:
raise ValueError("Invalid Input" )
__lowercase = __lowercase = sum(array[:k] )
for i in range(len(__magic_name__ ) - k ):
__lowercase = current_sum - array[i] + array[i + k]
__lowercase = max(__magic_name__ , __magic_name__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
_lowercase = [randint(-1_000, 1_000) for i in range(100)]
_lowercase = randint(0, 110)
print(F"The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}")
| 118
|
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __a ( __a ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowerCamelCase , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(_lowerCamelCase , "neck_hidden_sizes" ) )
self.parent.assertTrue(hasattr(_lowerCamelCase , "num_attention_heads" ) )
class __a :
'''simple docstring'''
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=32 , _lowerCamelCase=2 , _lowerCamelCase=3 , _lowerCamelCase=640 , _lowerCamelCase=4 , _lowerCamelCase="silu" , _lowerCamelCase=3 , _lowerCamelCase=32 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=0.02 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=10 , _lowerCamelCase=None , ) -> Tuple:
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = last_hidden_size
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = conv_kernel_size
__lowercase = output_stride
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = classifier_dropout_prob
__lowercase = use_labels
__lowercase = is_training
__lowercase = num_labels
__lowercase = initializer_range
__lowercase = scope
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.num_labels )
__lowercase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__lowercase = self.get_config()
return config, pixel_values, labels, pixel_labels
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
__lowercase = MobileViTModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = model(_lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = MobileViTForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.num_labels
__lowercase = MobileViTForSemanticSegmentation(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = model(_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__lowercase = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __a ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase : str = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
_lowerCamelCase : Optional[int] = (
{
"""feature-extraction""": MobileViTModel,
"""image-classification""": MobileViTForImageClassification,
"""image-segmentation""": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowerCamelCase : Tuple = False
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Any = False
_lowerCamelCase : List[Any] = False
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = MobileViTModelTester(self )
__lowercase = MobileViTConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViT does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip(reason="MobileViT does not support input and output embeddings" )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="MobileViT does not output attentions" )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCamelCase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
def check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
__lowercase = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
__lowercase = outputs.hidden_states
__lowercase = 5
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__lowercase = 2
for i in range(len(_lowerCamelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = MobileViTModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def lowerCAmelCase__ ( ) ->List[Any]:
__lowercase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __a ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
return MobileViTImageProcessor.from_pretrained("apple/mobilevit-xx-small" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = MobileViTForImageClassification.from_pretrained("apple/mobilevit-xx-small" ).to(_lowerCamelCase )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCamelCase , return_tensors="pt" ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
__lowercase = model(**_lowerCamelCase )
# verify the logits
__lowercase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
__lowercase = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__lowercase = model.to(_lowerCamelCase )
__lowercase = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCamelCase , return_tensors="pt" ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
__lowercase = model(**_lowerCamelCase )
__lowercase = outputs.logits
# verify the logits
__lowercase = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _lowerCamelCase )
__lowercase = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=_lowerCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _lowerCamelCase , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
__lowercase = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__lowercase = model.to(_lowerCamelCase )
__lowercase = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCamelCase , return_tensors="pt" ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
__lowercase = model(**_lowerCamelCase )
__lowercase = outputs.logits.detach().cpu()
__lowercase = image_processor.post_process_semantic_segmentation(outputs=_lowerCamelCase , target_sizes=[(50, 60)] )
__lowercase = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _lowerCamelCase )
__lowercase = image_processor.post_process_semantic_segmentation(outputs=_lowerCamelCase )
__lowercase = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _lowerCamelCase )
| 118
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCAmelCase : Any = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 453
|
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase = False ):
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3317044064679887385961981 and not allow_probable:
raise ValueError(
"Warning: upper bound of deterministic test is exceeded. "
"Pass allow_probable=True to allow probabilistic test. "
"A return value of True indicates a probable prime." )
# array bounds provided by analysis
lowercase :Optional[Any] = [
2047,
1373653,
25326001,
3215031751,
2152302898747,
3474749660383,
341550071728321,
1,
3825123056546413051,
1,
1,
318665857834031151167461,
3317044064679887385961981,
]
lowercase :List[Any] = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(lowerCamelCase, 1 ):
if n < _p:
# then we have our last prime to check
lowercase :Tuple = primes[:idx]
break
lowercase , lowercase :str = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
lowercase :Tuple = False
for r in range(lowerCamelCase ):
lowercase :Any = pow(lowerCamelCase, d * 2**r, lowerCamelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
lowercase :Any = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def UpperCAmelCase__ ( ):
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838201 )
assert miller_rabin(838207 )
# 1_373_653
assert not miller_rabin(17316001 )
assert miller_rabin(17316017 )
# 25_326_001
assert not miller_rabin(3078386641 )
assert miller_rabin(3078386653 )
# 3_215_031_751
assert not miller_rabin(1713045574801 )
assert miller_rabin(1713045574819 )
# 2_152_302_898_747
assert not miller_rabin(2779799728307 )
assert miller_rabin(2779799728327 )
# 3_474_749_660_383
assert not miller_rabin(113850023909441 )
assert miller_rabin(113850023909527 )
# 341_550_071_728_321
assert not miller_rabin(1275041018848804351 )
assert miller_rabin(1275041018848804391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79666464458507787791867 )
assert miller_rabin(79666464458507787791951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552840677446647897660333 )
assert miller_rabin(552840677446647897660359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 453
| 1
|
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
UpperCamelCase__ : int = 0
UpperCamelCase__ : List[Any] = len(UpperCamelCase__ ) # No of vertices in graph
UpperCamelCase__ : Optional[int] = [0] * n
UpperCamelCase__ : Tuple = [False] * n
def dfs(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase__ : Any = True
UpperCamelCase__ : int = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , id_ )
UpperCamelCase__ : Any = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
UpperCamelCase__ : Optional[Any] = min(low[at] , low[to] )
UpperCamelCase__ : list[tuple[int, int]] = []
for i in range(UpperCamelCase__ ):
if not visited[i]:
dfs(UpperCamelCase__ , -1 , UpperCamelCase__ , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 285
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase =logging.get_logger(__name__)
lowerCamelCase ={
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''wav2vec2'''
def __init__( self , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=7_6_8 , __SCREAMING_SNAKE_CASE=1_2 , __SCREAMING_SNAKE_CASE=1_2 , __SCREAMING_SNAKE_CASE=3_0_7_2 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-5 , __SCREAMING_SNAKE_CASE="group" , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , __SCREAMING_SNAKE_CASE=(5, 2, 2, 2, 2, 2, 2) , __SCREAMING_SNAKE_CASE=(1_0, 3, 3, 3, 3, 2, 2) , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=1_2_8 , __SCREAMING_SNAKE_CASE=1_6 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.05 , __SCREAMING_SNAKE_CASE=1_0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=1_0 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=3_2_0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1_0_0 , __SCREAMING_SNAKE_CASE=2_5_6 , __SCREAMING_SNAKE_CASE=2_5_6 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE="sum" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=2_5_6 , __SCREAMING_SNAKE_CASE=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , __SCREAMING_SNAKE_CASE=(5, 3, 3, 1, 1) , __SCREAMING_SNAKE_CASE=(1, 2, 3, 1, 1) , __SCREAMING_SNAKE_CASE=5_1_2 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE , pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = hidden_size
UpperCamelCase__ : List[Any] = feat_extract_norm
UpperCamelCase__ : Union[str, Any] = feat_extract_activation
UpperCamelCase__ : Union[str, Any] = list(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = list(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = list(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = conv_bias
UpperCamelCase__ : Optional[Any] = num_conv_pos_embeddings
UpperCamelCase__ : Tuple = num_conv_pos_embedding_groups
UpperCamelCase__ : Tuple = len(self.conv_dim )
UpperCamelCase__ : Optional[Any] = num_hidden_layers
UpperCamelCase__ : Union[str, Any] = intermediate_size
UpperCamelCase__ : Union[str, Any] = hidden_act
UpperCamelCase__ : List[Any] = num_attention_heads
UpperCamelCase__ : Optional[int] = hidden_dropout
UpperCamelCase__ : Tuple = attention_dropout
UpperCamelCase__ : List[Any] = activation_dropout
UpperCamelCase__ : Optional[int] = feat_proj_dropout
UpperCamelCase__ : int = final_dropout
UpperCamelCase__ : str = layerdrop
UpperCamelCase__ : Dict = layer_norm_eps
UpperCamelCase__ : str = initializer_range
UpperCamelCase__ : List[Any] = vocab_size
UpperCamelCase__ : Dict = do_stable_layer_norm
UpperCamelCase__ : Dict = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase__ : Tuple = apply_spec_augment
UpperCamelCase__ : str = mask_time_prob
UpperCamelCase__ : Tuple = mask_time_length
UpperCamelCase__ : Optional[Any] = mask_time_min_masks
UpperCamelCase__ : str = mask_feature_prob
UpperCamelCase__ : List[str] = mask_feature_length
UpperCamelCase__ : Optional[Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
UpperCamelCase__ : Dict = num_codevectors_per_group
UpperCamelCase__ : Optional[int] = num_codevector_groups
UpperCamelCase__ : Dict = contrastive_logits_temperature
UpperCamelCase__ : List[Any] = feat_quantizer_dropout
UpperCamelCase__ : List[Any] = num_negatives
UpperCamelCase__ : Tuple = codevector_dim
UpperCamelCase__ : List[str] = proj_codevector_dim
UpperCamelCase__ : Tuple = diversity_loss_weight
# ctc loss
UpperCamelCase__ : List[str] = ctc_loss_reduction
UpperCamelCase__ : Optional[Any] = ctc_zero_infinity
# adapter
UpperCamelCase__ : List[Any] = add_adapter
UpperCamelCase__ : Any = adapter_kernel_size
UpperCamelCase__ : Tuple = adapter_stride
UpperCamelCase__ : Tuple = num_adapter_layers
UpperCamelCase__ : Optional[Any] = output_hidden_size or hidden_size
UpperCamelCase__ : Union[str, Any] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCamelCase__ : List[str] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCamelCase__ : Union[str, Any] = list(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = list(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[Any] = list(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = xvector_output_dim
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 285
| 1
|
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
UpperCamelCase__ = logging.getLogger(__name__)
def UpperCAmelCase__ ( lowercase__ ):
__lowercase = git.Repo(search_parent_directories=lowercase__ )
__lowercase = {
"""repo_id""": str(lowercase__ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
}
with open(os.path.join(lowercase__ , """git_log.json""" ) , """w""" ) as f:
json.dump(lowercase__ , lowercase__ , indent=4 )
def UpperCAmelCase__ ( lowercase__ ):
if params.n_gpu <= 0:
__lowercase = 0
__lowercase = -1
__lowercase = True
__lowercase = False
return
assert torch.cuda.is_available()
logger.info("""Initializing GPUs""" )
if params.n_gpu > 1:
assert params.local_rank != -1
__lowercase = int(os.environ["""WORLD_SIZE"""] )
__lowercase = int(os.environ["""N_GPU_NODE"""] )
__lowercase = int(os.environ["""RANK"""] )
# number of nodes / node ID
__lowercase = params.world_size // params.n_gpu_per_node
__lowercase = params.global_rank // params.n_gpu_per_node
__lowercase = True
assert params.n_nodes == int(os.environ["""N_NODES"""] )
assert params.node_id == int(os.environ["""NODE_RANK"""] )
# local job (single GPU)
else:
assert params.local_rank == -1
__lowercase = 1
__lowercase = 0
__lowercase = 0
__lowercase = 0
__lowercase = 1
__lowercase = 1
__lowercase = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
__lowercase = params.node_id == 0 and params.local_rank == 0
__lowercase = params.n_nodes > 1
# summary
__lowercase = F"--- Global rank: {params.global_rank} - "
logger.info(PREFIX + """Number of nodes: %i""" % params.n_nodes )
logger.info(PREFIX + """Node ID : %i""" % params.node_id )
logger.info(PREFIX + """Local rank : %i""" % params.local_rank )
logger.info(PREFIX + """World size : %i""" % params.world_size )
logger.info(PREFIX + """GPUs per node : %i""" % params.n_gpu_per_node )
logger.info(PREFIX + """Master : %s""" % str(params.is_master ) )
logger.info(PREFIX + """Multi-node : %s""" % str(params.multi_node ) )
logger.info(PREFIX + """Multi-GPU : %s""" % str(params.multi_gpu ) )
logger.info(PREFIX + """Hostname : %s""" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("""Initializing PyTorch distributed""" )
torch.distributed.init_process_group(
init_method="""env://""" , backend="""nccl""" , )
def UpperCAmelCase__ ( lowercase__ ):
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 702
|
def UpperCAmelCase__ ( lowercase__ , lowercase__ ) -> bool:
__lowercase = len(lowercase__ )
__lowercase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__lowercase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__lowercase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__lowercase = subset[i - 1][j]
if arr[i - 1] <= j:
__lowercase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 634
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : List[str] = "vivit"
def __init__( self , lowerCamelCase_=2_24 , lowerCamelCase_=32 , lowerCamelCase_=[2, 16, 16] , lowerCamelCase_=3 , lowerCamelCase_=7_68 , lowerCamelCase_=12 , lowerCamelCase_=12 , lowerCamelCase_=30_72 , lowerCamelCase_="gelu_fast" , lowerCamelCase_=0.0 , lowerCamelCase_=0.0 , lowerCamelCase_=0.02 , lowerCamelCase_=1e-06 , lowerCamelCase_=True , **lowerCamelCase_ , ) -> Any:
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = image_size
lowerCAmelCase__ = num_frames
lowerCAmelCase__ = tubelet_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = qkv_bias
super().__init__(**lowerCamelCase_ )
| 90
|
"""simple docstring"""
from __future__ import annotations
def _A ( __lowercase ):
"""simple docstring"""
if len(__lowercase ) < 2:
raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" )
if any(i <= 0 for i in nums ):
raise ValueError("""All values must be greater than 0""" )
lowerCamelCase__ = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 129
| 0
|
import math
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> list[int]:
snake_case__ = []
snake_case__ = 2
snake_case__ = int(math.sqrt(__lowerCAmelCase ) ) # Size of every segment
snake_case__ = [True] * (end + 1)
snake_case__ = []
while start <= end:
if temp[start] is True:
in_prime.append(__lowerCAmelCase )
for i in range(start * start , end + 1 , __lowerCAmelCase ):
snake_case__ = False
start += 1
prime += in_prime
snake_case__ = end + 1
snake_case__ = min(2 * end , __lowerCAmelCase )
while low <= n:
snake_case__ = [True] * (high - low + 1)
for each in in_prime:
snake_case__ = math.floor(low / each ) * each
if t < low:
t += each
for j in range(__lowerCAmelCase , high + 1 , __lowerCAmelCase ):
snake_case__ = False
for j in range(len(__lowerCAmelCase ) ):
if temp[j] is True:
prime.append(j + low )
snake_case__ = high + 1
snake_case__ = min(high + end , __lowerCAmelCase )
return prime
print(sieve(1_0**6))
| 208
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCamelCase__ : Any = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = {
"""CarlCochet/trajectory-transformer-halfcheetah-medium-v2""": (
"""https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"""
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : int = 'trajectory_transformer'
__lowercase : int = ['past_key_values']
__lowercase : Optional[Any] = {
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self:List[str] , _a:Any=1_00 , _a:List[str]=5 , _a:Union[str, Any]=1 , _a:Any=1 , _a:Union[str, Any]=2_49 , _a:List[Any]=6 , _a:Tuple=17 , _a:str=25 , _a:List[str]=4 , _a:str=4 , _a:Dict=1_28 , _a:str=0.1 , _a:str=0.1 , _a:Dict=0.1 , _a:str=0.0006 , _a:Tuple=5_12 , _a:Any=0.02 , _a:Optional[int]=1e-12 , _a:List[str]=1 , _a:Any=True , _a:List[Any]=1 , _a:Dict=5_02_56 , _a:List[Any]=5_02_56 , **_a:Optional[Any] , ):
snake_case__ = vocab_size
snake_case__ = action_weight
snake_case__ = reward_weight
snake_case__ = value_weight
snake_case__ = max_position_embeddings
snake_case__ = block_size
snake_case__ = action_dim
snake_case__ = observation_dim
snake_case__ = transition_dim
snake_case__ = learning_rate
snake_case__ = n_layer
snake_case__ = n_head
snake_case__ = n_embd
snake_case__ = embd_pdrop
snake_case__ = attn_pdrop
snake_case__ = resid_pdrop
snake_case__ = initializer_range
snake_case__ = layer_norm_eps
snake_case__ = kaiming_initializer_range
snake_case__ = use_cache
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
| 208
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ : int = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Any = ['''MobileViTFeatureExtractor''']
lowerCamelCase__ : str = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : Optional[Any] = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ : str = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
lowerCamelCase__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 238
|
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowercase( lowercase__ ):
'''simple docstring'''
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=True , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=512 , __a=16 , __a=2 , __a=0.02 , __a=False , __a=True , __a="None" , __a=3 , __a=4 , __a=None , ):
__lowerCamelCase : List[str] = parent
__lowerCamelCase : Optional[int] = batch_size
__lowerCamelCase : Union[str, Any] = seq_length
__lowerCamelCase : Optional[int] = is_training
__lowerCamelCase : List[str] = use_input_mask
__lowerCamelCase : Dict = use_token_type_ids
__lowerCamelCase : Dict = use_labels
__lowerCamelCase : Union[str, Any] = vocab_size
__lowerCamelCase : List[Any] = hidden_size
__lowerCamelCase : int = num_hidden_layers
__lowerCamelCase : int = num_attention_heads
__lowerCamelCase : int = intermediate_size
__lowerCamelCase : Dict = hidden_act
__lowerCamelCase : Optional[int] = hidden_dropout_prob
__lowerCamelCase : Any = attention_probs_dropout_prob
__lowerCamelCase : List[str] = max_position_embeddings
__lowerCamelCase : Tuple = type_vocab_size
__lowerCamelCase : List[Any] = type_sequence_label_size
__lowerCamelCase : Optional[Any] = initializer_range
__lowerCamelCase : Any = num_labels
__lowerCamelCase : Union[str, Any] = num_choices
__lowerCamelCase : int = relative_attention
__lowerCamelCase : Tuple = position_biased_input
__lowerCamelCase : int = pos_att_type
__lowerCamelCase : Dict = scope
def snake_case_ ( self ):
__lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase : Dict = None
if self.use_input_mask:
__lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__lowerCamelCase : Dict = None
if self.use_token_type_ids:
__lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase : Any = None
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Union[str, Any] = None
if self.use_labels:
__lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def snake_case_ ( self , __a ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def snake_case_ ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCamelCase : Optional[int] = DebertaVaModel(config=__a )
model.to(__a )
model.eval()
__lowerCamelCase : int = model(__a , attention_mask=__a , token_type_ids=__a )[0]
__lowerCamelCase : Optional[int] = model(__a , token_type_ids=__a )[0]
__lowerCamelCase : Optional[Any] = model(__a )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def snake_case_ ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCamelCase : List[str] = DebertaVaForMaskedLM(config=__a )
model.to(__a )
model.eval()
__lowerCamelCase : List[str] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCamelCase : str = self.num_labels
__lowerCamelCase : Tuple = DebertaVaForSequenceClassification(__a )
model.to(__a )
model.eval()
__lowerCamelCase : Optional[int] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__a )
def snake_case_ ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCamelCase : Optional[Any] = self.num_labels
__lowerCamelCase : List[str] = DebertaVaForTokenClassification(config=__a )
model.to(__a )
model.eval()
__lowerCamelCase : List[Any] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCamelCase : Optional[int] = DebertaVaForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
__lowerCamelCase : List[str] = model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_ ( self , __a , __a , __a , __a , __a , __a , __a ):
__lowerCamelCase : Optional[Any] = DebertaVaForMultipleChoice(config=__a )
model.to(__a )
model.eval()
__lowerCamelCase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCamelCase : str = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case_ ( self ):
__lowerCamelCase : Tuple = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : int = config_and_inputs
__lowerCamelCase : Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowercase( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
__a : str = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
__a : Optional[int] = (
{
'feature-extraction': DebertaVaModel,
'fill-mask': DebertaVaForMaskedLM,
'question-answering': DebertaVaForQuestionAnswering,
'text-classification': DebertaVaForSequenceClassification,
'token-classification': DebertaVaForTokenClassification,
'zero-shot': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : Tuple = True
__a : List[Any] = False
__a : Any = False
__a : Tuple = False
__a : Tuple = False
def snake_case_ ( self ):
__lowerCamelCase : Optional[int] = DebertaVaModelTester(self )
__lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=__a , hidden_size=37 )
def snake_case_ ( self ):
self.config_tester.run_common_tests()
def snake_case_ ( self ):
__lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__a )
def snake_case_ ( self ):
__lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__a )
def snake_case_ ( self ):
__lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__a )
def snake_case_ ( self ):
__lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__a )
def snake_case_ ( self ):
__lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__a )
def snake_case_ ( self ):
__lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__a )
@slow
def snake_case_ ( self ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : List[str] = DebertaVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowercase( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason='Model not available yet' )
def snake_case_ ( self ):
pass
@slow
def snake_case_ ( self ):
__lowerCamelCase : Optional[int] = DebertaVaModel.from_pretrained('microsoft/deberta-v2-xlarge' )
__lowerCamelCase : Dict = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
__lowerCamelCase : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__lowerCamelCase : Union[str, Any] = model(__a , attention_mask=__a )[0]
# compare the actual values for a slice.
__lowerCamelCase : Optional[int] = torch.tensor(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1E-4 ) , f'''{output[:, 1:4, 1:4]}''' )
| 594
| 0
|
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Optional[Any] = False
_lowerCAmelCase : Optional[Any] = 3.0
class lowerCamelCase ( unittest.TestCase ):
def A( self):
self.assertDictEqual(MockClass().to_kwargs() , {})
self.assertDictEqual(MockClass(a=2).to_kwargs() , {'''a''': 2})
self.assertDictEqual(MockClass(a=2 , b=_UpperCAmelCase).to_kwargs() , {'''a''': 2, '''b''': True})
self.assertDictEqual(MockClass(a=2 , c=2.2_5).to_kwargs() , {'''a''': 2, '''c''': 2.2_5})
@require_cuda
def A( self):
__UpperCAmelCase : Any = GradScalerKwargs(init_scale=1_0_2_4 , growth_factor=2)
AcceleratorState._reset_state()
__UpperCAmelCase : Union[str, Any] = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler])
print(accelerator.use_fpaa)
__UpperCAmelCase : Dict = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_0_2_4.0)
self.assertEqual(scaler._growth_factor , 2.0)
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5)
self.assertEqual(scaler._growth_interval , 2_0_0_0)
self.assertEqual(scaler._enabled , _UpperCAmelCase)
@require_multi_gpu
def A( self):
__UpperCAmelCase : Optional[int] = ['''torchrun''', F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__)]
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy())
if __name__ == "__main__":
lowerCAmelCase = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
lowerCAmelCase = Accelerator(kwargs_handlers=[ddp_scaler])
lowerCAmelCase = torch.nn.Linear(100, 200)
lowerCAmelCase = accelerator.prepare(model)
# Check the values changed in kwargs
lowerCAmelCase = """"""
lowerCAmelCase = model.bucket_bytes_cap // (1_024 * 1_024)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 714
|
from typing import Dict, Optional
import numpy as np
import datasets
lowerCAmelCase = """
IoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union
between the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,
the mean IoU of the image is calculated by taking the IoU of each class and averaging them.
"""
lowerCAmelCase = """
Args:
predictions (`List[ndarray]`):
List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
references (`List[ndarray]`):
List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.
num_labels (`int`):
Number of classes (categories).
ignore_index (`int`):
Index that will be ignored during evaluation.
nan_to_num (`int`, *optional*):
If specified, NaN values will be replaced by the number defined by the user.
label_map (`dict`, *optional*):
If specified, dictionary mapping old label indices to new label indices.
reduce_labels (`bool`, *optional*, defaults to `False`):
Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,
and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.
Returns:
`Dict[str, float | ndarray]` comprising various elements:
- *mean_iou* (`float`):
Mean Intersection-over-Union (IoU averaged over all categories).
- *mean_accuracy* (`float`):
Mean accuracy (averaged over all categories).
- *overall_accuracy* (`float`):
Overall accuracy on all images.
- *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):
Per category accuracy.
- *per_category_iou* (`ndarray` of shape `(num_labels,)`):
Per category IoU.
Examples:
>>> import numpy as np
>>> mean_iou = datasets.load_metric(\"mean_iou\")
>>> # suppose one has 3 different segmentation maps predicted
>>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])
>>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])
>>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])
>>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])
>>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])
>>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])
>>> predicted = [predicted_1, predicted_2, predicted_3]
>>> ground_truth = [actual_1, actual_2, actual_3]
>>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}
"""
lowerCAmelCase = """\
@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,
author = {{MMSegmentation Contributors}},
license = {Apache-2.0},
month = {7},
title = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},
url = {https://github.com/open-mmlab/mmsegmentation},
year = {2020}
}"""
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = False , ) -> Optional[Any]:
'''simple docstring'''
if label_map is not None:
for old_id, new_id in label_map.items():
__UpperCAmelCase : List[str] = new_id
# turn into Numpy arrays
__UpperCAmelCase : Tuple = np.array(lowercase_ )
__UpperCAmelCase : str = np.array(lowercase_ )
if reduce_labels:
__UpperCAmelCase : List[Any] = 255
__UpperCAmelCase : str = label - 1
__UpperCAmelCase : Dict = 255
__UpperCAmelCase : str = label != ignore_index
__UpperCAmelCase : Optional[int] = np.not_equal(lowercase_ , lowercase_ )
__UpperCAmelCase : List[str] = pred_label[mask]
__UpperCAmelCase : Any = np.array(lowercase_ )[mask]
__UpperCAmelCase : Optional[Any] = pred_label[pred_label == label]
__UpperCAmelCase : Optional[Any] = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : Any = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : List[str] = np.histogram(lowercase_ , bins=lowercase_ , range=(0, num_labels - 1) )[0]
__UpperCAmelCase : List[Any] = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = False , ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : List[Any] = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : str = np.zeros((num_labels,) , dtype=np.floataa )
__UpperCAmelCase : str = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(lowercase_ , lowercase_ ):
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = intersect_and_union(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = False , ) -> str:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = total_intersect_and_union(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# compute metrics
__UpperCAmelCase : Any = {}
__UpperCAmelCase : Union[str, Any] = total_area_intersect.sum() / total_area_label.sum()
__UpperCAmelCase : Optional[Any] = total_area_intersect / total_area_union
__UpperCAmelCase : List[str] = total_area_intersect / total_area_label
__UpperCAmelCase : Optional[int] = np.nanmean(lowercase_ )
__UpperCAmelCase : int = np.nanmean(lowercase_ )
__UpperCAmelCase : List[str] = all_acc
__UpperCAmelCase : Any = iou
__UpperCAmelCase : str = acc
if nan_to_num is not None:
__UpperCAmelCase : Any = {metric: np.nan_to_num(lowercase_ , nan=lowercase_ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
def A( self):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16'''))),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16'''))),
}) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = False , ):
__UpperCAmelCase : str = mean_iou(
results=lowercase__ , gt_seg_maps=lowercase__ , num_labels=lowercase__ , ignore_index=lowercase__ , nan_to_num=lowercase__ , label_map=lowercase__ , reduce_labels=lowercase__ , )
return iou_result
| 675
| 0
|
def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
return round(float(moles / volume ) * nfactor )
def _A ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
return round(float((moles * 0.0_8_2_1 * temperature) / (volume) ) )
def _A ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
return round(float((moles * 0.0_8_2_1 * temperature) / (pressure) ) )
def _A ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
return round(float((pressure * volume) / (0.0_8_2_1 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 563
|
import math
from numpy import inf
from scipy.integrate import quad
def _A ( SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
if num <= 0:
raise ValueError("math domain error" )
return quad(SCREAMING_SNAKE_CASE , 0 , SCREAMING_SNAKE_CASE , args=(SCREAMING_SNAKE_CASE) )[0]
def _A ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
return math.pow(SCREAMING_SNAKE_CASE , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 563
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class _lowercase :
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=2 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=16 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=False , UpperCamelCase_=True , UpperCamelCase_="None" , UpperCamelCase_=3 , UpperCamelCase_=4 , UpperCamelCase_=None , ):
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_input_mask
__magic_name__ = use_token_type_ids
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = type_vocab_size
__magic_name__ = type_sequence_label_size
__magic_name__ = initializer_range
__magic_name__ = num_labels
__magic_name__ = num_choices
__magic_name__ = relative_attention
__magic_name__ = position_biased_input
__magic_name__ = pos_att_type
__magic_name__ = scope
def lowerCAmelCase__ ( self ):
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ = None
if self.use_input_mask:
__magic_name__ = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ = None
if self.use_token_type_ids:
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ = None
__magic_name__ = None
__magic_name__ = None
if self.use_labels:
__magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=_lowercase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__magic_name__ = TFDebertaVaModel(config=_lowercase )
__magic_name__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
__magic_name__ = [input_ids, input_mask]
__magic_name__ = model(_lowercase )
__magic_name__ = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__magic_name__ = TFDebertaVaForMaskedLM(config=_lowercase )
__magic_name__ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__magic_name__ = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__magic_name__ = self.num_labels
__magic_name__ = TFDebertaVaForSequenceClassification(config=_lowercase )
__magic_name__ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__magic_name__ = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__magic_name__ = self.num_labels
__magic_name__ = TFDebertaVaForTokenClassification(config=_lowercase )
__magic_name__ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__magic_name__ = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__magic_name__ = TFDebertaVaForQuestionAnswering(config=_lowercase )
__magic_name__ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
__magic_name__ = model(_lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self ):
__magic_name__ = self.prepare_config_and_inputs()
(
__magic_name__
) = config_and_inputs
__magic_name__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
_lowerCamelCase = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
_lowerCamelCase = (
{
'''feature-extraction''': TFDebertaVaModel,
'''fill-mask''': TFDebertaVaForMaskedLM,
'''question-answering''': TFDebertaVaForQuestionAnswering,
'''text-classification''': TFDebertaVaForSequenceClassification,
'''token-classification''': TFDebertaVaForTokenClassification,
'''zero-shot''': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase = False
_lowerCamelCase = False
def lowerCAmelCase__ ( self ):
__magic_name__ = TFDebertaVaModelTester(self )
__magic_name__ = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def lowerCAmelCase__ ( self ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def lowerCAmelCase__ ( self ):
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowercase )
def lowerCAmelCase__ ( self ):
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowercase )
def lowerCAmelCase__ ( self ):
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowercase )
def lowerCAmelCase__ ( self ):
__magic_name__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowercase )
@slow
def lowerCAmelCase__ ( self ):
__magic_name__ = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
self.assertIsNotNone(_lowercase )
@require_tf
class _lowercase ( unittest.TestCase ):
@unittest.skip(reason='''Model not available yet''' )
def lowerCAmelCase__ ( self ):
pass
@slow
def lowerCAmelCase__ ( self ):
__magic_name__ = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
__magic_name__ = tf.constant([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
__magic_name__ = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__magic_name__ = model(_lowercase , attention_mask=_lowercase )[0]
__magic_name__ = tf.constant(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , _lowercase , atol=1E-4 )
| 721
|
"""simple docstring"""
class _lowercase :
def __init__( self , UpperCamelCase_ ):
__magic_name__ = size
__magic_name__ = [0] * size
__magic_name__ = [0] * size
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase_ ):
return index | (index + 1)
@staticmethod
def lowerCAmelCase__ ( UpperCamelCase_ ):
return (index & (index + 1)) - 1
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
__magic_name__ = value
while index < self.size:
__magic_name__ = self.get_prev(UpperCamelCase_ ) + 1
if current_left_border == index:
__magic_name__ = value
else:
__magic_name__ = max(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__magic_name__ = self.get_next(UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
right -= 1 # Because of right is exclusive
__magic_name__ = 0
while left <= right:
__magic_name__ = self.get_prev(UpperCamelCase_ )
if left <= current_left:
__magic_name__ = max(UpperCamelCase_ , self.tree[right] )
__magic_name__ = current_left
else:
__magic_name__ = max(UpperCamelCase_ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 190
| 0
|
'''simple docstring'''
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = logging.get_logger()
# the current default level is logging.WARNING
UpperCAmelCase : Dict = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = logging.get_verbosity()
UpperCAmelCase : Union[str, Any] = logging.get_logger("transformers.models.bart.tokenization_bart" )
UpperCAmelCase : Dict = "Testing 1, 2, 3"
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(snake_case ) as cl:
logger.warning(snake_case )
self.assertEqual(cl.out , msg + "\n" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(snake_case ) as cl:
logger.warning(snake_case )
self.assertEqual(cl.out , "" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(snake_case ) as cl:
logger.warning(snake_case )
self.assertEqual(cl.out , msg + "\n" )
# restore to the original level
logging.set_verbosity(snake_case )
@mockenv(TRANSFORMERS_VERBOSITY="error" )
def A_ ( self ):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
UpperCAmelCase : str = logging.get_logger("transformers.models.bart.tokenization_bart" )
UpperCAmelCase : List[Any] = os.getenv("TRANSFORMERS_VERBOSITY" , snake_case )
UpperCAmelCase : Any = logging.log_levels[env_level_str]
UpperCAmelCase : str = logging.get_verbosity()
self.assertEqual(
snake_case , snake_case , f"TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}" , )
# restore to the original level
UpperCAmelCase : Any = ""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="super-error" )
def A_ ( self ):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
UpperCAmelCase : List[str] = logging.logging.getLogger()
with CaptureLogger(snake_case ) as cl:
# this action activates the env var
logging.get_logger("transformers.models.bart.tokenization_bart" )
self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error" , cl.out )
# no need to restore as nothing was changed
def A_ ( self ):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
UpperCAmelCase : str = logging.get_logger("transformers.models.bart.tokenization_bart" )
UpperCAmelCase : List[Any] = "Testing 1, 2, 3"
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1" ):
# nothing should be logged as env var disables this method
with CaptureLogger(snake_case ) as cl:
logger.warning_advice(snake_case )
self.assertEqual(cl.out , "" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(snake_case ) as cl:
logger.warning_advice(snake_case )
self.assertEqual(cl.out , msg + "\n" )
def lowercase ( ):
'''simple docstring'''
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 679
|
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class UpperCamelCase__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : TreeNode | None = None
SCREAMING_SNAKE_CASE__ : TreeNode | None = None
a : Optional[Any] = namedtuple("CoinsDistribResult", "moves excess")
def lowercase ( __magic_name__ ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(__magic_name__ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__magic_name__ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__magic_name__ ) != count_coins(__magic_name__ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(__magic_name__ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = get_distrib(node.left )
UpperCAmelCase , UpperCAmelCase : Any = get_distrib(node.right )
UpperCAmelCase : Optional[Any] = 1 - left_distrib_excess
UpperCAmelCase : int = 1 - right_distrib_excess
UpperCAmelCase : List[Any] = (
left_distrib_moves
+ right_distrib_moves
+ abs(__magic_name__ )
+ abs(__magic_name__ )
)
UpperCAmelCase : List[Any] = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__magic_name__ , __magic_name__ )
return get_distrib(__magic_name__ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 679
| 1
|
def __lowerCAmelCase ( lowercase : str ) -> int:
"""simple docstring"""
assert column_title.isupper()
snake_case : Dict = 0
snake_case : Tuple = len(lowercase ) - 1
snake_case : Optional[Any] = 0
while index >= 0:
snake_case : str = (ord(column_title[index] ) - 64) * pow(26 , lowercase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 711
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : Optional[Any] = '''big_bird'''
def __init__( self , UpperCamelCase__=5_0358 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu_new" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=4096 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=True , UpperCamelCase__=0 , UpperCamelCase__=1 , UpperCamelCase__=2 , UpperCamelCase__=66 , UpperCamelCase__="block_sparse" , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=64 , UpperCamelCase__=3 , UpperCamelCase__=None , **UpperCamelCase__ , ) -> Tuple:
'''simple docstring'''
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , sep_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
snake_case : Union[str, Any] = vocab_size
snake_case : List[Any] = max_position_embeddings
snake_case : int = hidden_size
snake_case : str = num_hidden_layers
snake_case : Any = num_attention_heads
snake_case : int = intermediate_size
snake_case : Union[str, Any] = hidden_act
snake_case : Optional[Any] = hidden_dropout_prob
snake_case : List[Any] = attention_probs_dropout_prob
snake_case : int = initializer_range
snake_case : List[str] = type_vocab_size
snake_case : Optional[Any] = layer_norm_eps
snake_case : Optional[Any] = use_cache
snake_case : List[Any] = rescale_embeddings
snake_case : Any = attention_type
snake_case : List[Any] = use_bias
snake_case : int = block_size
snake_case : int = num_random_blocks
snake_case : Optional[int] = classifier_dropout
class _lowerCAmelCase ( snake_case_ ):
@property
def lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case : Optional[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 117
| 0
|
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
_UpperCamelCase : Union[str, Any] = ['''bert-base-uncased''', '''bert-base-cased''']
_UpperCamelCase : Optional[int] = '''hf-internal-testing/tiny-bert-tf-only'''
if is_tf_available():
class snake_case__ ( tf.keras.Model):
def __init__( self : str , _A : Union[str, Any] ) -> int:
super().__init__()
UpperCAmelCase_ : Any = tokenizer
UpperCAmelCase_ : Any = AutoConfig.from_pretrained(snake_case__ )
UpperCAmelCase_ : str = TFAutoModel.from_config(snake_case__ )
def A ( self : Any , _A : Tuple ) -> Tuple:
UpperCAmelCase_ : Optional[int] = self.tokenizer(snake_case__ )
UpperCAmelCase_ : str = self.bert(**snake_case__ )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class snake_case__ ( unittest.TestCase):
def A ( self : str ) -> int:
super().setUp()
UpperCAmelCase_ : Optional[Any] = [
BertTokenizer.from_pretrained(snake_case__ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
UpperCAmelCase_ : Optional[Any] = [TFBertTokenizer.from_pretrained(snake_case__ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(snake_case__ , use_fast_bert_tokenizer=snake_case__ )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
UpperCAmelCase_ : Dict = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
UpperCAmelCase_ : Dict = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def A ( self : Optional[int] ) -> Optional[int]:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCAmelCase_ : Optional[int] = tokenizer(snake_case__ , return_tensors='''tf''' , padding='''longest''' )
UpperCAmelCase_ : Union[str, Any] = tf_tokenizer(snake_case__ )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def A ( self : List[Any] ) -> Union[str, Any]:
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase_ : int = tf_tokenizer(self.paired_sentences )
UpperCAmelCase_ : List[Any] = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def A ( self : Optional[int] ) -> List[str]:
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase_ : str = tf.function(snake_case__ )
for test_inputs in (self.test_sentences, self.paired_sentences):
UpperCAmelCase_ : List[Any] = tf.constant(snake_case__ )
UpperCAmelCase_ : Union[str, Any] = compiled_tokenizer(snake_case__ )
UpperCAmelCase_ : Dict = tf_tokenizer(snake_case__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def A ( self : Optional[int] ) -> List[str]:
for tf_tokenizer in self.tf_tokenizers:
UpperCAmelCase_ : int = ModelToSave(tokenizer=snake_case__ )
UpperCAmelCase_ : Union[str, Any] = tf.convert_to_tensor(self.test_sentences )
UpperCAmelCase_ : List[Any] = model(snake_case__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
UpperCAmelCase_ : Any = Path(snake_case__ ) / 'saved.model'
model.save(snake_case__ )
UpperCAmelCase_ : Any = tf.keras.models.load_model(snake_case__ )
UpperCAmelCase_ : Tuple = loaded_model(snake_case__ )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1e-5 )
| 541
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
UpperCamelCase__ : List[Any] = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def __UpperCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : int ) -> Any:
"""simple docstring"""
inspect_dataset(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = path + '.py'
assert script_name in os.listdir(lowerCamelCase_ )
assert "__pycache__" not in os.listdir(lowerCamelCase_ )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def __UpperCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict ) -> Optional[Any]:
"""simple docstring"""
inspect_metric(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = path + '.py'
assert script_name in os.listdir(lowerCamelCase_ )
assert "__pycache__" not in os.listdir(lowerCamelCase_ )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def __UpperCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = get_dataset_config_info(lowerCamelCase_ , config_name=lowerCamelCase_ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int ) -> List[Any]:
"""simple docstring"""
with pytest.raises(lowerCamelCase_ ):
get_dataset_config_info(lowerCamelCase_ , config_name=lowerCamelCase_ )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : List[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = get_dataset_config_names(lowerCamelCase_ )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = get_dataset_infos(lowerCamelCase_ )
assert list(infos.keys() ) == expected_configs
SCREAMING_SNAKE_CASE_ : Optional[Any] = expected_configs[0]
assert expected_config in infos
SCREAMING_SNAKE_CASE_ : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_dataset_infos(lowerCamelCase_ )
assert expected_config in infos
SCREAMING_SNAKE_CASE_ : Tuple = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int ) -> int:
"""simple docstring"""
with pytest.raises(lowerCamelCase_ ):
get_dataset_split_names(lowerCamelCase_ , config_name=lowerCamelCase_ )
| 105
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase__ = {
'''configuration_blenderbot''': [
'''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotConfig''',
'''BlenderbotOnnxConfig''',
],
'''tokenization_blenderbot''': ['''BlenderbotTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''BlenderbotTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotForCausalLM''',
'''BlenderbotForConditionalGeneration''',
'''BlenderbotModel''',
'''BlenderbotPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TFBlenderbotForConditionalGeneration''',
'''TFBlenderbotModel''',
'''TFBlenderbotPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''FlaxBlenderbotForConditionalGeneration''',
'''FlaxBlenderbotModel''',
'''FlaxBlenderbotPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 544
|
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('''.''')
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
F"{test_file} instead." )
UpperCamelCase = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(F"`test_file` should be a python file. Got {test_fn} instead." )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
F"`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead." )
UpperCamelCase = components[:-1] + [test_fn.replace(".py" , "" )]
UpperCamelCase = ".".join(_SCREAMING_SNAKE_CASE )
return test_module_path
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = get_module_path(_SCREAMING_SNAKE_CASE )
UpperCamelCase = importlib.import_module(_SCREAMING_SNAKE_CASE )
return test_module
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = get_test_module(_SCREAMING_SNAKE_CASE )
for attr in dir(_SCREAMING_SNAKE_CASE ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = get_test_module(_SCREAMING_SNAKE_CASE )
for attr in dir(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
UpperCamelCase = getattr(_SCREAMING_SNAKE_CASE , "all_model_classes" , [] )
if len(_SCREAMING_SNAKE_CASE ) > 0:
test_classes.append(_SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = get_test_classes(_SCREAMING_SNAKE_CASE )
UpperCamelCase = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = test_class()
if hasattr(_SCREAMING_SNAKE_CASE , "setUp" ):
test.setUp()
UpperCamelCase = None
if hasattr(_SCREAMING_SNAKE_CASE , "model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
UpperCamelCase = test.model_tester.__class__
return model_tester
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = get_test_classes(_SCREAMING_SNAKE_CASE )
UpperCamelCase = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(_SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = []
for test_class in test_classes:
UpperCamelCase = get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE )
if tester_class is not None:
tester_classes.append(_SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = get_test_classes(_SCREAMING_SNAKE_CASE )
UpperCamelCase = {test_class: get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE ) for test_class in test_classes}
return test_tester_mapping
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = get_model_classes(_SCREAMING_SNAKE_CASE )
UpperCamelCase = {
model_class: get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes
}
return model_test_mapping
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = get_model_classes(_SCREAMING_SNAKE_CASE )
UpperCamelCase = {
model_class: get_tester_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes
}
return model_to_tester_mapping
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return o
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return o.__name__
elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ):
return [to_json(_SCREAMING_SNAKE_CASE ) for x in o]
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return {to_json(_SCREAMING_SNAKE_CASE ): to_json(_SCREAMING_SNAKE_CASE ) for k, v in o.items()}
else:
return o
| 544
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
while a != 0:
__lowercase ,__lowercase : Tuple = b % a, a
return b
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
if gcd(__UpperCamelCase , __UpperCamelCase ) != 1:
__lowercase : Union[str, Any] = f"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(__UpperCamelCase )
__lowercase ,__lowercase ,__lowercase : str = 1, 0, a
__lowercase ,__lowercase ,__lowercase : str = 0, 1, m
while va != 0:
__lowercase : Union[str, Any] = ua // va
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase : int = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 76
|
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_a : str = logging.get_logger(__name__)
class a_ ( a ):
def __init__( self : List[str] , *UpperCAmelCase__ : Optional[int] , **UpperCAmelCase__ : Tuple ):
"""simple docstring"""
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 598
| 0
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class A__ ( lowercase_ ):
lowercase = ['''image_processor''', '''tokenizer''']
lowercase = '''OwlViTImageProcessor'''
lowercase = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : int , a : List[Any]=None , a : Optional[int]=None , **a : str ):
'''simple docstring'''
lowerCAmelCase__ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a , )
lowerCAmelCase__ : Optional[int] = kwargs.pop('feature_extractor' )
lowerCAmelCase__ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(a , a )
def __call__( self : Optional[Any] , a : Any=None , a : Union[str, Any]=None , a : Union[str, Any]=None , a : Any="max_length" , a : str="np" , **a : Any ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(a , a ) or (isinstance(a , a ) and not isinstance(text[0] , a )):
lowerCAmelCase__ : List[str] = [self.tokenizer(a , padding=a , return_tensors=a , **a )]
elif isinstance(a , a ) and isinstance(text[0] , a ):
lowerCAmelCase__ : Optional[int] = []
# Maximum number of queries across batch
lowerCAmelCase__ : Optional[int] = max([len(a ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(a ) != max_num_queries:
lowerCAmelCase__ : Tuple = t + [' '] * (max_num_queries - len(a ))
lowerCAmelCase__ : List[str] = self.tokenizer(a , padding=a , return_tensors=a , **a )
encodings.append(a )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
lowerCAmelCase__ : Optional[Any] = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowerCAmelCase__ : Dict = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowerCAmelCase__ : List[str] = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowerCAmelCase__ : List[Any] = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowerCAmelCase__ : Union[str, Any] = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
lowerCAmelCase__ : Dict = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowerCAmelCase__ : str = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowerCAmelCase__ : List[Any] = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
lowerCAmelCase__ : Dict = BatchEncoding()
lowerCAmelCase__ : Dict = input_ids
lowerCAmelCase__ : List[str] = attention_mask
if query_images is not None:
lowerCAmelCase__ : List[Any] = BatchEncoding()
lowerCAmelCase__ : Optional[Any] = self.image_processor(
a , return_tensors=a , **a ).pixel_values
lowerCAmelCase__ : Optional[int] = query_pixel_values
if images is not None:
lowerCAmelCase__ : int = self.image_processor(a , return_tensors=a , **a )
if text is not None and images is not None:
lowerCAmelCase__ : Optional[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowerCAmelCase__ : List[str] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def _lowerCamelCase ( self : str , *a : Any , **a : Optional[Any] ):
'''simple docstring'''
return self.image_processor.post_process(*a , **a )
def _lowerCamelCase ( self : Any , *a : str , **a : Union[str, Any] ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*a , **a )
def _lowerCamelCase ( self : Union[str, Any] , *a : Any , **a : int ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*a , **a )
def _lowerCamelCase ( self : List[Any] , *a : List[Any] , **a : Dict ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a , **a )
def _lowerCamelCase ( self : List[str] , *a : Optional[int] , **a : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.decode(*a , **a )
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , a , )
return self.image_processor_class
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , a , )
return self.image_processor
| 719
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
"""configuration_chinese_clip""": [
"""CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ChineseCLIPConfig""",
"""ChineseCLIPOnnxConfig""",
"""ChineseCLIPTextConfig""",
"""ChineseCLIPVisionConfig""",
],
"""processing_chinese_clip""": ["""ChineseCLIPProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""ChineseCLIPFeatureExtractor"""]
lowerCamelCase__ = ["""ChineseCLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ChineseCLIPModel""",
"""ChineseCLIPPreTrainedModel""",
"""ChineseCLIPTextModel""",
"""ChineseCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 69
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase : str = {
"configuration_chinese_clip": [
"CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ChineseCLIPConfig",
"ChineseCLIPOnnxConfig",
"ChineseCLIPTextConfig",
"ChineseCLIPVisionConfig",
],
"processing_chinese_clip": ["ChineseCLIPProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = ["ChineseCLIPFeatureExtractor"]
__lowerCamelCase : Optional[Any] = ["ChineseCLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
"CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ChineseCLIPModel",
"ChineseCLIPPreTrainedModel",
"ChineseCLIPTextModel",
"ChineseCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__lowerCamelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 323
|
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
__lowerCamelCase : str = (
"https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"
)
__lowerCamelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCamelCase_() -> str:
UpperCAmelCase = "https://pypi.org/pypi/diffusers/json"
UpperCAmelCase = json.loads(request.urlopen(lowerCamelCase_ ).read() )["releases"].keys()
return sorted(lowerCamelCase_ , key=lambda lowerCamelCase_ : version.Version(lowerCamelCase_ ) )
def lowerCamelCase_() -> Optional[int]:
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(lowerCamelCase_ )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
UpperCAmelCase = Path(lowerCamelCase_ ) / "__init__.py"
if not init_path.exists():
init_path.touch()
def lowerCamelCase_(lowerCamelCase_ ) -> Dict:
init_hf_modules()
UpperCAmelCase = Path(lowerCamelCase_ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
UpperCAmelCase = dynamic_module_path / "__init__.py"
if not init_path.exists():
init_path.touch()
def lowerCamelCase_(lowerCamelCase_ ) -> Optional[Any]:
with open(lowerCamelCase_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase = f.read()
# Imports of the form `import .xxx`
UpperCAmelCase = re.findall("^\s*import\s+\.(\S+)\s*$" , lowerCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" , lowerCamelCase_ , flags=re.MULTILINE )
# Unique-ify
return list(set(lowerCamelCase_ ) )
def lowerCamelCase_(lowerCamelCase_ ) -> Union[str, Any]:
UpperCAmelCase = False
UpperCAmelCase = [module_file]
UpperCAmelCase = []
# Let's recurse through all relative imports
while not no_change:
UpperCAmelCase = []
for f in files_to_check:
new_imports.extend(get_relative_imports(lowerCamelCase_ ) )
UpperCAmelCase = Path(lowerCamelCase_ ).parent
UpperCAmelCase = [str(module_path / m ) for m in new_imports]
UpperCAmelCase = [f for f in new_import_files if f not in all_relative_imports]
UpperCAmelCase = [F'{f}.py' for f in new_import_files]
UpperCAmelCase = len(lowerCamelCase_ ) == 0
all_relative_imports.extend(lowerCamelCase_ )
return all_relative_imports
def lowerCamelCase_(lowerCamelCase_ ) -> List[str]:
with open(lowerCamelCase_ , "r" , encoding="utf-8" ) as f:
UpperCAmelCase = f.read()
# Imports of the form `import xxx`
UpperCAmelCase = re.findall("^\s*import\s+(\S+)\s*$" , lowerCamelCase_ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("^\s*from\s+(\S+)\s+import" , lowerCamelCase_ , flags=re.MULTILINE )
# Only keep the top-level module
UpperCAmelCase = [imp.split("." )[0] for imp in imports if not imp.startswith("." )]
# Unique-ify and test we got them all
UpperCAmelCase = list(set(lowerCamelCase_ ) )
UpperCAmelCase = []
for imp in imports:
try:
importlib.import_module(lowerCamelCase_ )
except ImportError:
missing_packages.append(lowerCamelCase_ )
if len(lowerCamelCase_ ) > 0:
raise ImportError(
"This modeling file requires the following packages that were not found in your environment: "
F'{", ".join(lowerCamelCase_ )}. Run `pip install {" ".join(lowerCamelCase_ )}`' )
return get_relative_imports(lowerCamelCase_ )
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ ) -> List[Any]:
UpperCAmelCase = module_path.replace(os.path.sep , "." )
UpperCAmelCase = importlib.import_module(lowerCamelCase_ )
if class_name is None:
return find_pipeline_class(lowerCamelCase_ )
return getattr(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_(lowerCamelCase_ ) -> Optional[int]:
from ..pipelines import DiffusionPipeline
UpperCAmelCase = dict(inspect.getmembers(lowerCamelCase_ , inspect.isclass ) )
UpperCAmelCase = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , lowerCamelCase_ )
and cls.__module__.split("." )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'
F' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'
F' {loaded_module}.' )
UpperCAmelCase = cls
return pipeline_class
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = False , ) -> Union[str, Any]:
UpperCAmelCase = str(lowerCamelCase_ )
UpperCAmelCase = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if os.path.isfile(lowerCamelCase_ ):
UpperCAmelCase = module_file_or_url
UpperCAmelCase = "local"
elif pretrained_model_name_or_path.count("/" ) == 0:
UpperCAmelCase = get_diffusers_versions()
# cut ".dev0"
UpperCAmelCase = "v" + ".".join(__version__.split("." )[:3] )
# retrieve github version that matches
if revision is None:
UpperCAmelCase = latest_version if latest_version[1:] in available_versions else "main"
logger.info(F'Defaulting to latest_version: {revision}.' )
elif revision in available_versions:
UpperCAmelCase = F'v{revision}'
elif revision == "main":
UpperCAmelCase = revision
else:
raise ValueError(
F'`custom_revision`: {revision} does not exist. Please make sure to choose one of'
F' {", ".join(available_versions + ["main"] )}.' )
# community pipeline on GitHub
UpperCAmelCase = COMMUNITY_PIPELINES_URL.format(revision=lowerCamelCase_ , pipeline=lowerCamelCase_ )
try:
UpperCAmelCase = cached_download(
lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , )
UpperCAmelCase = "git"
UpperCAmelCase = pretrained_model_name_or_path + ".py"
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
else:
try:
# Load from URL or cache if already cached
UpperCAmelCase = hf_hub_download(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , proxies=lowerCamelCase_ , resume_download=lowerCamelCase_ , local_files_only=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , )
UpperCAmelCase = os.path.join("local" , "--".join(pretrained_model_name_or_path.split("/" ) ) )
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
# Check we have all the requirements in our environment
UpperCAmelCase = check_imports(lowerCamelCase_ )
# Now we move the module inside our cached dynamic modules.
UpperCAmelCase = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(lowerCamelCase_ )
UpperCAmelCase = Path(lowerCamelCase_ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(lowerCamelCase_ , submodule_path / module_file )
for module_needed in modules_needed:
UpperCAmelCase = F'{module_needed}.py'
shutil.copy(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCAmelCase = use_auth_token
elif use_auth_token is True:
UpperCAmelCase = HfFolder.get_token()
else:
UpperCAmelCase = None
UpperCAmelCase = model_info(lowerCamelCase_ , revision=lowerCamelCase_ , token=lowerCamelCase_ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
UpperCAmelCase = submodule_path / commit_hash
UpperCAmelCase = full_submodule + os.path.sep + commit_hash
create_dynamic_module(lowerCamelCase_ )
if not (submodule_path / module_file).exists():
shutil.copy(lowerCamelCase_ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
lowerCamelCase_ , F'{module_needed}.py' , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
return os.path.join(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase_(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = False , lowerCamelCase_ = False , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = False , **lowerCamelCase_ , ) -> Tuple:
UpperCAmelCase = get_cached_module_file(
lowerCamelCase_ , lowerCamelCase_ , cache_dir=lowerCamelCase_ , force_download=lowerCamelCase_ , resume_download=lowerCamelCase_ , proxies=lowerCamelCase_ , use_auth_token=lowerCamelCase_ , revision=lowerCamelCase_ , local_files_only=lowerCamelCase_ , )
return get_class_in_module(lowerCamelCase_ , final_module.replace(".py" , "" ) )
| 323
| 1
|
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def lowerCamelCase__ ( a ):
__snake_case = min(a ) # min() finds the minimum value
__snake_case = max(a ) # max() finds the maximum value
__snake_case = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
__snake_case = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(a , a ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
__snake_case = 0
for count in range(a ):
while holes[count] > 0:
holes[count] -= 1
__snake_case = count + min_val
i += 1
def lowerCamelCase__ ( ):
__snake_case = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(a )
print('Sorted order is:' , ' '.join(a ) )
if __name__ == "__main__":
main()
| 427
|
'''simple docstring'''
import re
import string
import numpy as np
import datasets
_lowercase = """
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
"""
_lowercase = """
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
25.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
50.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
75.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results[\"exact_match\"], 1))
100.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]
>>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
33.3
"""
_lowercase = """
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def lowercase__ ( self : Any ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def lowercase__ ( self : int , __lowerCAmelCase : Any , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : Any=False , ):
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
__snake_case = np.array([re.sub(__lowerCAmelCase , '' , __lowerCAmelCase ) for x in predictions] )
__snake_case = np.array([re.sub(__lowerCAmelCase , '' , __lowerCAmelCase ) for x in references] )
else:
__snake_case = np.asarray(__lowerCAmelCase )
__snake_case = np.asarray(__lowerCAmelCase )
if ignore_case:
__snake_case = np.char.lower(__lowerCAmelCase )
__snake_case = np.char.lower(__lowerCAmelCase )
if ignore_punctuation:
__snake_case = string.punctuation.maketrans('' , '' , string.punctuation )
__snake_case = np.char.translate(__lowerCAmelCase , table=__lowerCAmelCase )
__snake_case = np.char.translate(__lowerCAmelCase , table=__lowerCAmelCase )
if ignore_numbers:
__snake_case = string.digits.maketrans('' , '' , string.digits )
__snake_case = np.char.translate(__lowerCAmelCase , table=__lowerCAmelCase )
__snake_case = np.char.translate(__lowerCAmelCase , table=__lowerCAmelCase )
__snake_case = predictions == references
return {"exact_match": np.mean(__lowerCAmelCase ) * 1_0_0}
| 427
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
def lowerCamelCase ( ) ->Generator[int, None, None]:
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = 2
while True:
_SCREAMING_SNAKE_CASE = factor_map.pop(UpperCamelCase_ , UpperCamelCase_ )
if factor:
_SCREAMING_SNAKE_CASE = factor + prime
while x in factor_map:
x += factor
_SCREAMING_SNAKE_CASE = factor
else:
_SCREAMING_SNAKE_CASE = prime
yield prime
prime += 1
def lowerCamelCase ( __lowerCamelCase : float = 1e1_0 ) ->int:
_SCREAMING_SNAKE_CASE = sieve()
_SCREAMING_SNAKE_CASE = 1
while True:
_SCREAMING_SNAKE_CASE = next(UpperCamelCase_ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(UpperCamelCase_ )
n += 2
if __name__ == "__main__":
print(solution())
| 314
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowerCAmelCase__ ( )-> List[str]:
A__ = '''https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'''
A__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ).convert('''RGB''' )
return image
def lowerCAmelCase__ ( UpperCamelCase_ : Dict )-> Any:
A__ = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"visual_encoder.blocks.{i}.norm1.weight", f"vision_model.encoder.layers.{i}.layer_norm1.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm1.bias", f"vision_model.encoder.layers.{i}.layer_norm1.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm2.weight", f"vision_model.encoder.layers.{i}.layer_norm2.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm2.bias", f"vision_model.encoder.layers.{i}.layer_norm2.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.qkv.weight", f"vision_model.encoder.layers.{i}.self_attn.qkv.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.weight", f"vision_model.encoder.layers.{i}.self_attn.projection.weight",) )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.bias", f"vision_model.encoder.layers.{i}.self_attn.projection.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.weight", f"vision_model.encoder.layers.{i}.mlp.fc1.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.bias", f"vision_model.encoder.layers.{i}.mlp.fc1.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.weight", f"vision_model.encoder.layers.{i}.mlp.fc2.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.bias", f"vision_model.encoder.layers.{i}.mlp.fc2.bias") )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.layernorm.bias''') )
# fmt: on
return rename_keys
def lowerCAmelCase__ ( UpperCamelCase_ : int , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int )-> List[Any]:
A__ = dct.pop(UpperCamelCase_ )
A__ = val
def lowerCAmelCase__ ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[int] )-> Optional[int]:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
A__ = state_dict.pop(f"visual_encoder.blocks.{i}.attn.q_bias" )
A__ = state_dict.pop(f"visual_encoder.blocks.{i}.attn.v_bias" )
# next, set bias in the state dict
A__ = torch.cat((q_bias, torch.zeros_like(UpperCamelCase_ , requires_grad=UpperCamelCase_ ), v_bias) )
A__ = qkv_bias
def lowerCAmelCase__ ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] )-> int:
A__ = 3_6_4 if '''coco''' in model_name else 2_2_4
A__ = BlipaVisionConfig(image_size=UpperCamelCase_ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
A__ = OPTConfig.from_pretrained('''facebook/opt-2.7b''' , eos_token_id=UpperCamelCase_ ).to_dict()
elif "opt-6.7b" in model_name:
A__ = OPTConfig.from_pretrained('''facebook/opt-6.7b''' , eos_token_id=UpperCamelCase_ ).to_dict()
elif "t5-xl" in model_name:
A__ = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
A__ = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict()
A__ = BlipaConfig(vision_config=UpperCamelCase_ , text_config=UpperCamelCase_ )
return config, image_size
@torch.no_grad()
def lowerCAmelCase__ ( UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Any=False )-> Optional[Any]:
A__ = (
AutoTokenizer.from_pretrained('''facebook/opt-2.7b''' )
if '''opt''' in model_name
else AutoTokenizer.from_pretrained('''google/flan-t5-xl''' )
)
A__ = tokenizer('''\n''' , add_special_tokens=UpperCamelCase_ ).input_ids[0]
A__ , A__ = get_blipa_config(UpperCamelCase_ , eos_token_id=UpperCamelCase_ )
A__ = BlipaForConditionalGeneration(UpperCamelCase_ ).eval()
A__ = {
'''blip2-opt-2.7b''': ('''blip2_opt''', '''pretrain_opt2.7b'''),
'''blip2-opt-6.7b''': ('''blip2_opt''', '''pretrain_opt6.7b'''),
'''blip2-opt-2.7b-coco''': ('''blip2_opt''', '''caption_coco_opt2.7b'''),
'''blip2-opt-6.7b-coco''': ('''blip2_opt''', '''caption_coco_opt6.7b'''),
'''blip2-flan-t5-xl''': ('''blip2_t5''', '''pretrain_flant5xl'''),
'''blip2-flan-t5-xl-coco''': ('''blip2_t5''', '''caption_coco_flant5xl'''),
'''blip2-flan-t5-xxl''': ('''blip2_t5''', '''pretrain_flant5xxl'''),
}
A__ , A__ = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
A__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
A__ , A__ , A__ = load_model_and_preprocess(
name=UpperCamelCase_ , model_type=UpperCamelCase_ , is_eval=UpperCamelCase_ , device=UpperCamelCase_ )
original_model.eval()
print('''Done!''' )
# update state dict keys
A__ = original_model.state_dict()
A__ = create_rename_keys(UpperCamelCase_ )
for src, dest in rename_keys:
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
A__ = state_dict.pop(UpperCamelCase_ )
if key.startswith('''Qformer.bert''' ):
A__ = key.replace('''Qformer.bert''' , '''qformer''' )
if "attention.self" in key:
A__ = key.replace('''self''' , '''attention''' )
if "opt_proj" in key:
A__ = key.replace('''opt_proj''' , '''language_projection''' )
if "t5_proj" in key:
A__ = key.replace('''t5_proj''' , '''language_projection''' )
if key.startswith('''opt''' ):
A__ = key.replace('''opt''' , '''language''' )
if key.startswith('''t5''' ):
A__ = key.replace('''t5''' , '''language''' )
A__ = val
# read in qv biases
read_in_q_v_bias(UpperCamelCase_ , UpperCamelCase_ )
A__ , A__ = hf_model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
assert len(UpperCamelCase_ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
A__ = load_demo_image()
A__ = vis_processors['''eval'''](UpperCamelCase_ ).unsqueeze(0 ).to(UpperCamelCase_ )
A__ = tokenizer(['''\n'''] , return_tensors='''pt''' ).input_ids.to(UpperCamelCase_ )
# create processor
A__ = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} , image_mean=UpperCamelCase_ , image_std=UpperCamelCase_ )
A__ = BlipaProcessor(image_processor=UpperCamelCase_ , tokenizer=UpperCamelCase_ )
A__ = processor(images=UpperCamelCase_ , return_tensors='''pt''' ).pixel_values.to(UpperCamelCase_ )
# make sure processor creates exact same pixel values
assert torch.allclose(UpperCamelCase_ , UpperCamelCase_ )
original_model.to(UpperCamelCase_ )
hf_model.to(UpperCamelCase_ )
with torch.no_grad():
if "opt" in model_name:
A__ = original_model({'''image''': original_pixel_values, '''text_input''': ['''''']} ).logits
A__ = hf_model(UpperCamelCase_ , UpperCamelCase_ ).logits
else:
A__ = original_model(
{'''image''': original_pixel_values, '''text_input''': ['''\n'''], '''text_output''': ['''\n''']} ).logits
A__ = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_0_0 )
A__ = hf_model(UpperCamelCase_ , UpperCamelCase_ , labels=UpperCamelCase_ ).logits
assert original_logits.shape == logits.shape
print('''First values of original logits:''' , original_logits[0, :3, :3] )
print('''First values of HF logits:''' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
A__ = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=UpperCamelCase_ )
assert torch.allclose(logits[0, :3, :3] , UpperCamelCase_ , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
A__ = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=UpperCamelCase_ )
else:
# cast to same type
A__ = logits.dtype
assert torch.allclose(original_logits.to(UpperCamelCase_ ) , UpperCamelCase_ , atol=1E-2 )
print('''Looks ok!''' )
print('''Generating a caption...''' )
A__ = ''''''
A__ = tokenizer(UpperCamelCase_ , return_tensors='''pt''' ).input_ids.to(UpperCamelCase_ )
A__ = original_model.generate({'''image''': original_pixel_values} )
A__ = hf_model.generate(
UpperCamelCase_ , UpperCamelCase_ , do_sample=UpperCamelCase_ , num_beams=5 , max_length=3_0 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('''Original generation:''' , UpperCamelCase_ )
A__ = input_ids.shape[1]
A__ = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=UpperCamelCase_ )
A__ = [text.strip() for text in output_text]
print('''HF generation:''' , UpperCamelCase_ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(UpperCamelCase_ )
hf_model.save_pretrained(UpperCamelCase_ )
if push_to_hub:
processor.push_to_hub(f"nielsr/{model_name}" )
hf_model.push_to_hub(f"nielsr/{model_name}" )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
_lowercase = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
_lowercase = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 632
| 0
|
'''simple docstring'''
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
__SCREAMING_SNAKE_CASE : Tuple = pd.read_csv(
'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'
'position_salaries.csv'
)
__SCREAMING_SNAKE_CASE : Tuple = dataset.iloc[:, 1:2].values
__SCREAMING_SNAKE_CASE : Any = dataset.iloc[:, 2].values
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = train_test_split(X, y, test_size=0.2, random_state=0)
__SCREAMING_SNAKE_CASE : Optional[int] = PolynomialFeatures(degree=4)
__SCREAMING_SNAKE_CASE : Optional[Any] = poly_reg.fit_transform(X)
__SCREAMING_SNAKE_CASE : Optional[Any] = LinearRegression()
pol_reg.fit(X_poly, y)
def _snake_case ( ) -> Any:
plt.scatter(lowercase , lowercase , color="""red""" )
plt.plot(lowercase , pol_reg.predict(poly_reg.fit_transform(lowercase ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 697
|
'''simple docstring'''
def _snake_case ( lowercase ) -> bool:
if not isinstance(lowercase , lowercase ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
__a : str = str(lowercase )
__a : Any = """""".join(sorted(lowercase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _snake_case ( lowercase = 9_9 ) -> int:
if not 0 < percent < 1_0_0:
raise ValueError("""solution() only accepts values from 0 to 100""" )
__a : List[str] = 0
__a : Union[str, Any] = 1
while True:
if check_bouncy(lowercase ):
bouncy_num += 1
if (bouncy_num / num) * 1_0_0 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(99)}''')
| 697
| 1
|
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
UpperCAmelCase_ = False
UpperCAmelCase_ = True
UpperCAmelCase_ = False
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
"""--repo_path""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = {
"""image_size""": """sample_size""",
"""num_res_blocks""": """layers_per_block""",
"""block_channels""": """block_out_channels""",
"""down_blocks""": """down_block_types""",
"""up_blocks""": """up_block_types""",
"""downscale_freq_shift""": """freq_shift""",
"""resnet_num_groups""": """norm_num_groups""",
"""resnet_act_fn""": """act_fn""",
"""resnet_eps""": """norm_eps""",
"""num_head_channels""": """attention_head_dim""",
}
UpperCAmelCase_ = {
"""time_steps""": """time_proj""",
"""mid""": """mid_block""",
"""downsample_blocks""": """down_blocks""",
"""upsample_blocks""": """up_blocks""",
}
UpperCAmelCase_ = """""" if has_file(args.repo_path, """config.json""") else """unet"""
with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader:
UpperCAmelCase_ = reader.read()
UpperCAmelCase_ = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, """config.json"""):
UpperCAmelCase_ = UNetaDModel(**config)
else:
UpperCAmelCase_ = UNetaDConditionModel if """ldm-text2im-large-256""" in args.repo_path else UNetaDModel
UpperCAmelCase_ = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
UpperCAmelCase_ = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
UpperCAmelCase_ = config[key]
del config[key]
UpperCAmelCase_ = [k.replace("""UNetRes""", """""") for k in config["""down_block_types"""]]
UpperCAmelCase_ = [k.replace("""UNetRes""", """""") for k in config["""up_block_types"""]]
if do_only_weights:
UpperCAmelCase_ = torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin"""))
UpperCAmelCase_ = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""):
continue
UpperCAmelCase_ = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(""".""")[0] == key:
UpperCAmelCase_ = param_value
UpperCAmelCase_ = True
if not has_changed:
UpperCAmelCase_ = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 2
|
import collections
import os
import re
from pathlib import Path
UpperCAmelCase_ = """src/transformers"""
# Matches is_xxx_available()
UpperCAmelCase_ = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
UpperCAmelCase_ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
UpperCAmelCase_ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
UpperCAmelCase_ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
UpperCAmelCase_ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
UpperCAmelCase_ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
UpperCAmelCase_ = re.compile(r"""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
UpperCAmelCase_ = re.compile(r"""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
UpperCAmelCase_ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
UpperCAmelCase_ = re.compile(r"""^\s*try:""")
# Catches a line with else:
UpperCAmelCase_ = re.compile(r"""^\s*else:""")
def SCREAMING_SNAKE_CASE_ ( _snake_case :Optional[int] ) -> Any:
if _re_test_backend.search(_snake_case ) is None:
return None
_A = [b[0] for b in _re_backend.findall(_snake_case )]
backends.sort()
return "_and_".join(_snake_case )
def SCREAMING_SNAKE_CASE_ ( _snake_case :Any ) -> Any:
with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
_A = f.readlines()
_A = 0
while line_index < len(_snake_case ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(_snake_case ):
return None
# First grab the objects without a specific backend in _import_structure
_A = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
_A = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(_snake_case ):
_A = _re_one_line_import_struct.search(_snake_case ).groups()[0]
_A = re.findall(r'''\[([^\]]+)\]''' , _snake_case )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
_A = _re_import_struct_key_value.search(_snake_case )
if single_line_import_search is not None:
_A = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
_A = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
_A = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_A = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_A = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
_A = lines[line_index]
if _re_import_struct_add_one.search(_snake_case ) is not None:
objects.append(_re_import_struct_add_one.search(_snake_case ).groups()[0] )
elif _re_import_struct_add_many.search(_snake_case ) is not None:
_A = _re_import_struct_add_many.search(_snake_case ).groups()[0].split(''', ''' )
_A = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_between_brackets.search(_snake_case ) is not None:
_A = _re_between_brackets.search(_snake_case ).groups()[0].split(''', ''' )
_A = [obj[1:-1] for obj in imports if len(_snake_case ) > 0]
objects.extend(_snake_case )
elif _re_quote_object.search(_snake_case ) is not None:
objects.append(_re_quote_object.search(_snake_case ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 12 + '''"''' ):
objects.append(line[13:-3] )
line_index += 1
_A = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_A = []
while (
line_index < len(_snake_case )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
_A = lines[line_index]
_A = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
_A = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(_snake_case ):
# If the line is an if is_backend_available, we grab all objects associated.
_A = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_A = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_A = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
_A = lines[line_index]
_A = _re_import.search(_snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 12 ):
objects.append(line[12:-2] )
line_index += 1
_A = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] , _snake_case :Dict ) -> Any:
def find_duplicates(_snake_case :Any ):
return [k for k, v in collections.Counter(_snake_case ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_A = []
for key in import_dict_objects.keys():
_A = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'''Duplicate _import_structure definitions for: {duplicate_imports}''' )
_A = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_A = '''base imports''' if key == '''none''' else F'''{key} backend'''
errors.append(F'''Differences for {name}:''' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F''' {a} in TYPE_HINT but not in _import_structure.''' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F''' {a} in _import_structure but not in TYPE_HINT.''' )
return errors
def SCREAMING_SNAKE_CASE_ ( ) -> int:
_A = []
for root, _, files in os.walk(_snake_case ):
if "__init__.py" in files:
_A = os.path.join(_snake_case , '''__init__.py''' )
_A = parse_init(_snake_case )
if objects is not None:
_A = analyze_results(*_snake_case )
if len(_snake_case ) > 0:
_A = F'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'''
failures.append('''\n'''.join(_snake_case ) )
if len(_snake_case ) > 0:
raise ValueError('''\n\n'''.join(_snake_case ) )
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
_A = []
for path, directories, files in os.walk(_snake_case ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(_snake_case )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(_snake_case ) / folder).glob('''*.py''' ) ) ) == 0:
continue
_A = str((Path(_snake_case ) / folder).relative_to(_snake_case ) )
_A = short_path.replace(os.path.sep , '''.''' )
submodules.append(_snake_case )
for fname in files:
if fname == "__init__.py":
continue
_A = str((Path(_snake_case ) / fname).relative_to(_snake_case ) )
_A = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(_snake_case )
return submodules
UpperCAmelCase_ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
"""models.esm.openfold_utils""",
]
def SCREAMING_SNAKE_CASE_ ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
from transformers.utils import direct_transformers_import
_A = direct_transformers_import(_snake_case )
_A = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(_snake_case , '''__init__.py''' ) , '''r''' ) as f:
_A = f.read()
import_structure_keys.update(set(re.findall(r'''import_structure\[\"([^\"]*)\"\]''' , _snake_case ) ) )
_A = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(_snake_case ) > 0:
_A = '''\n'''.join(F'''- {module}''' for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F'''{list_of_modules}\n'''
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 2
| 1
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
@staticmethod
@abstractmethod
def UpperCamelCase ( snake_case__ : ArgumentParser ):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
raise NotImplementedError()
| 673
|
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
a_ : List[str] = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
a_ : Dict = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
a_ : str = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
a_ : int = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def UpperCamelCase ( self : str ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def UpperCamelCase ( self : Dict , snake_case__ : int ):
"""simple docstring"""
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : List[Any]=0.9 , snake_case__ : Optional[Any]=3 , snake_case__ : Any=0.5 ):
"""simple docstring"""
if NLTK_VERSION >= version.Version('3.6.5' ):
SCREAMING_SNAKE_CASE = [
meteor_score.single_meteor_score(
word_tokenize(snake_case__ ) , word_tokenize(snake_case__ ) , alpha=snake_case__ , beta=snake_case__ , gamma=snake_case__ )
for ref, pred in zip(snake_case__ , snake_case__ )
]
else:
SCREAMING_SNAKE_CASE = [
meteor_score.single_meteor_score(snake_case__ , snake_case__ , alpha=snake_case__ , beta=snake_case__ , gamma=snake_case__ )
for ref, pred in zip(snake_case__ , snake_case__ )
]
return {"meteor": np.mean(snake_case__ )}
| 673
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__a :Tuple = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :List[str] = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :int = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Optional[Any] = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__a :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 86
|
from __future__ import annotations
def lowerCAmelCase_ (lowercase__ : list[int] , lowercase__ : list[int] , lowercase__ : int ) -> tuple[float, list[float]]:
'''simple docstring'''
lowerCAmelCase__ = list(range(len(lowercase__ ) ) )
lowerCAmelCase__ = [v / w for v, w in zip(lowercase__ , lowercase__ )]
index.sort(key=lambda lowercase__ : ratio[i] , reverse=lowercase__ )
lowerCAmelCase__ = 0
lowerCAmelCase__ = [0] * len(lowercase__ )
for i in index:
if weight[i] <= capacity:
lowerCAmelCase__ = 1
max_value += value[i]
capacity -= weight[i]
else:
lowerCAmelCase__ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = '''table-transformer'''
UpperCamelCase_ = ['''past_key_values''']
UpperCamelCase_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : int , UpperCAmelCase : Dict=True , UpperCAmelCase : int=None , UpperCAmelCase : List[Any]=3 , UpperCAmelCase : Dict=100 , UpperCAmelCase : Optional[int]=6 , UpperCAmelCase : int=2048 , UpperCAmelCase : List[Any]=8 , UpperCAmelCase : Any=6 , UpperCAmelCase : List[Any]=2048 , UpperCAmelCase : Union[str, Any]=8 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : Optional[Any]=0.0 , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Optional[Any]="relu" , UpperCAmelCase : List[str]=256 , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : int=0.0 , UpperCAmelCase : List[str]=0.0 , UpperCAmelCase : Tuple=0.0_2 , UpperCAmelCase : str=1.0 , UpperCAmelCase : int=False , UpperCAmelCase : List[str]="sine" , UpperCAmelCase : Tuple="resnet50" , UpperCAmelCase : int=True , UpperCAmelCase : Union[str, Any]=False , UpperCAmelCase : Dict=1 , UpperCAmelCase : Tuple=5 , UpperCAmelCase : Dict=2 , UpperCAmelCase : int=1 , UpperCAmelCase : Union[str, Any]=1 , UpperCAmelCase : int=5 , UpperCAmelCase : str=2 , UpperCAmelCase : Union[str, Any]=0.1 , **UpperCAmelCase : List[str] , ) -> str:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowercase : List[Any] =CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
lowercase : Any =backbone_config.get('''model_type''' )
lowercase : Union[str, Any] =CONFIG_MAPPING[backbone_model_type]
lowercase : Dict =config_class.from_dict(UpperCAmelCase )
# set timm attributes to None
lowercase , lowercase , lowercase : Union[str, Any] =None, None, None
lowercase : Optional[int] =use_timm_backbone
lowercase : Tuple =backbone_config
lowercase : int =num_channels
lowercase : int =num_queries
lowercase : List[str] =d_model
lowercase : int =encoder_ffn_dim
lowercase : Tuple =encoder_layers
lowercase : Optional[Any] =encoder_attention_heads
lowercase : Dict =decoder_ffn_dim
lowercase : Optional[Any] =decoder_layers
lowercase : List[Any] =decoder_attention_heads
lowercase : Optional[int] =dropout
lowercase : List[Any] =attention_dropout
lowercase : str =activation_dropout
lowercase : List[Any] =activation_function
lowercase : str =init_std
lowercase : int =init_xavier_std
lowercase : str =encoder_layerdrop
lowercase : Any =decoder_layerdrop
lowercase : str =encoder_layers
lowercase : Any =auxiliary_loss
lowercase : Any =position_embedding_type
lowercase : str =backbone
lowercase : List[Any] =use_pretrained_backbone
lowercase : str =dilation
# Hungarian matcher
lowercase : Union[str, Any] =class_cost
lowercase : Tuple =bbox_cost
lowercase : Optional[Any] =giou_cost
# Loss coefficients
lowercase : str =mask_loss_coefficient
lowercase : List[Any] =dice_loss_coefficient
lowercase : Any =bbox_loss_coefficient
lowercase : Any =giou_loss_coefficient
lowercase : str =eos_coefficient
super().__init__(is_encoder_decoder=UpperCAmelCase , **UpperCAmelCase )
@property
def A__ ( self : Tuple ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def A__ ( self : List[str] ) -> int:
'''simple docstring'''
return self.d_model
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = version.parse('''1.11''' )
@property
def A__ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def A__ ( self : str ) -> float:
'''simple docstring'''
return 1e-5
@property
def A__ ( self : List[str] ) -> int:
'''simple docstring'''
return 12
| 8
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def lowercase_ ( __A : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
lowercase : List[Any] =BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
lowercase : List[str] =job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
lowercase : Union[str, Any] =job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 8
| 1
|
'''simple docstring'''
from math import pi
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : int):
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 665
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''OwlViTFeatureExtractor''']
UpperCAmelCase = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 119
| 0
|
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def a_ ( __snake_case , __snake_case ) -> Tuple:
UpperCamelCase_ = args.log_outputs
UpperCamelCase_ = "_".join(args.dataset.split('/' ) + [args.config, args.split] )
# load metric
UpperCamelCase_ = load_metric('wer' )
UpperCamelCase_ = load_metric('cer' )
# compute metrics
UpperCamelCase_ = wer.compute(references=result['target'] , predictions=result['prediction'] )
UpperCamelCase_ = cer.compute(references=result['target'] , predictions=result['prediction'] )
# print & log results
UpperCamelCase_ = F'''WER: {wer_result}\nCER: {cer_result}'''
print(snake_case_ )
with open(F'''{dataset_id}_eval_results.txt''' , 'w' ) as f:
f.write(snake_case_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
UpperCamelCase_ = F'''log_{dataset_id}_predictions.txt'''
UpperCamelCase_ = F'''log_{dataset_id}_targets.txt'''
with open(snake_case_ , 'w' ) as p, open(snake_case_ , 'w' ) as t:
# mapping function to write output
def write_to_file(__snake_case , __snake_case ):
p.write(F'''{i}''' + '\n' )
p.write(batch['prediction'] + '\n' )
t.write(F'''{i}''' + '\n' )
t.write(batch['target'] + '\n' )
result.map(snake_case_ , with_indices=snake_case_ )
def a_ ( __snake_case ) -> str:
UpperCamelCase_ = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
UpperCamelCase_ = re.sub(snake_case_ , '' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
UpperCamelCase_ = ["\n\n", "\n", " ", " "]
for t in token_sequences_to_ignore:
UpperCamelCase_ = " ".join(text.split(snake_case_ ) )
return text
def a_ ( __snake_case ) -> Optional[Any]:
# load dataset
UpperCamelCase_ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
UpperCamelCase_ = AutoFeatureExtractor.from_pretrained(args.model_id )
UpperCamelCase_ = feature_extractor.sampling_rate
# resample audio
UpperCamelCase_ = dataset.cast_column('audio' , Audio(sampling_rate=snake_case_ ) )
# load eval pipeline
if args.device is None:
UpperCamelCase_ = 0 if torch.cuda.is_available() else -1
UpperCamelCase_ = pipeline('automatic-speech-recognition' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(__snake_case ):
UpperCamelCase_ = asr(
batch['audio']['array'] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
UpperCamelCase_ = prediction["text"]
UpperCamelCase_ = normalize_text(batch['sentence'] )
return batch
# run inference on all examples
UpperCamelCase_ = dataset.map(snake_case_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case_ , snake_case_ )
if __name__ == "__main__":
__a : Dict = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
__a : str = parser.parse_args()
main(args)
| 703
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__a : Optional[Any] = {
"""configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""],
"""processing_layoutlmv2""": ["""LayoutLMv2Processor"""],
"""tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Union[str, Any] = ["""LayoutLMv2TokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : List[str] = ["""LayoutLMv2FeatureExtractor"""]
__a : str = ["""LayoutLMv2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Tuple = [
"""LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv2ForQuestionAnswering""",
"""LayoutLMv2ForSequenceClassification""",
"""LayoutLMv2ForTokenClassification""",
"""LayoutLMv2Layer""",
"""LayoutLMv2Model""",
"""LayoutLMv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__a : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 559
| 0
|
def _lowercase ( ):
"""simple docstring"""
return 1
def _lowercase ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def _lowercase ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(SCREAMING_SNAKE_CASE_ )
def _lowercase ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(SCREAMING_SNAKE_CASE_ )
def _lowercase ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(SCREAMING_SNAKE_CASE_ )
def _lowercase ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(SCREAMING_SNAKE_CASE_ )
def _lowercase ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(SCREAMING_SNAKE_CASE_ )
def _lowercase ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(SCREAMING_SNAKE_CASE_ )
def _lowercase ( SCREAMING_SNAKE_CASE_ : int = 200 ):
"""simple docstring"""
return two_pound(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 386
|
def _lowercase ( SCREAMING_SNAKE_CASE_ : int = 10 , SCREAMING_SNAKE_CASE_ : int = 22 ):
"""simple docstring"""
UpperCamelCase = range(1 , SCREAMING_SNAKE_CASE_ )
UpperCamelCase = range(1 , SCREAMING_SNAKE_CASE_ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F'''{solution(10, 22) = }''')
| 386
| 1
|
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_UpperCamelCase : Dict = logging.get_logger(__name__)
# TODO: upload to AWS
_UpperCamelCase : Tuple = {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json"
),
}
class _snake_case ( a_ ):
SCREAMING_SNAKE_CASE : int = '''retribert'''
def __init__( self , _SCREAMING_SNAKE_CASE=3_05_22 , _SCREAMING_SNAKE_CASE=7_68 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=30_72 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-12 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1_28 , _SCREAMING_SNAKE_CASE=0 , **_SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = share_encoders
lowerCAmelCase = projection_dim
| 713
|
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
_UpperCamelCase : List[Any] = "\\n\n"
_UpperCamelCase : List[Any] = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n"
_UpperCamelCase : Dict = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 16 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
lowerCAmelCase = 'cuda'
else:
lowerCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
lowerCAmelCase = AutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = model.to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
lowerCAmelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(_SCREAMING_SNAKE_CASE ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
lowerCAmelCase = model.config.max_length - 1
else:
lowerCAmelCase = model.config.max_length
lowerCAmelCase = tokenizer(
_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , return_tensors='pt' , return_attention_mask=_SCREAMING_SNAKE_CASE , ).to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = encodings['input_ids']
lowerCAmelCase = encodings['attention_mask']
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
lowerCAmelCase = []
lowerCAmelCase = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase = min(start_index + batch_size , len(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = encoded_texts[start_index:end_index]
lowerCAmelCase = attn_masks[start_index:end_index]
if add_start_token:
lowerCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
lowerCAmelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(_SCREAMING_SNAKE_CASE ), attn_mask] , dim=1 )
lowerCAmelCase = encoded_batch
with torch.no_grad():
lowerCAmelCase = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE ).logits
lowerCAmelCase = out_logits[..., :-1, :].contiguous()
lowerCAmelCase = labels[..., 1:].contiguous()
lowerCAmelCase = attn_mask[..., 1:].contiguous()
lowerCAmelCase = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , _SCREAMING_SNAKE_CASE ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(_SCREAMING_SNAKE_CASE )}
| 514
| 0
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=7 , _lowerCamelCase=3 , _lowerCamelCase=3_0 , _lowerCamelCase=4_0_0 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=[0.5, 0.5, 0.5] , _lowerCamelCase=True , _lowerCamelCase=1 / 2_5_5 , _lowerCamelCase=True , ):
UpperCamelCase_: List[str] = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}
UpperCamelCase_: Union[str, Any] = parent
UpperCamelCase_: int = batch_size
UpperCamelCase_: Optional[Any] = num_channels
UpperCamelCase_: str = min_resolution
UpperCamelCase_: Union[str, Any] = max_resolution
UpperCamelCase_: str = do_resize
UpperCamelCase_: Dict = size
UpperCamelCase_: List[Any] = do_normalize
UpperCamelCase_: Union[str, Any] = image_mean
UpperCamelCase_: Tuple = image_std
UpperCamelCase_: Any = do_rescale
UpperCamelCase_: int = rescale_factor
UpperCamelCase_: List[Any] = do_pad
def _a ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _a ( self , _lowerCamelCase , _lowerCamelCase=False ):
if not batched:
UpperCamelCase_: List[str] = image_inputs[0]
if isinstance(_lowerCamelCase , Image.Image ):
UpperCamelCase_ ,UpperCamelCase_: Union[str, Any] = image.size
else:
UpperCamelCase_ ,UpperCamelCase_: List[str] = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase_: List[str] = int(self.size['shortest_edge'] * h / w )
UpperCamelCase_: Dict = self.size['shortest_edge']
elif w > h:
UpperCamelCase_: str = self.size['shortest_edge']
UpperCamelCase_: Dict = int(self.size['shortest_edge'] * w / h )
else:
UpperCamelCase_: List[str] = self.size['shortest_edge']
UpperCamelCase_: Tuple = self.size['shortest_edge']
else:
UpperCamelCase_: List[Any] = []
for image in image_inputs:
UpperCamelCase_ ,UpperCamelCase_: Optional[int] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase_: Tuple = max(_lowerCamelCase , key=lambda _lowerCamelCase : item[0] )[0]
UpperCamelCase_: int = max(_lowerCamelCase , key=lambda _lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _lowerCAmelCase( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] =ConditionalDetrImageProcessor if is_vision_available() else None
def _a ( self ):
UpperCamelCase_: List[Any] = ConditionalDetrImageProcessingTester(self )
@property
def _a ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ):
UpperCamelCase_: Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase , 'image_mean' ) )
self.assertTrue(hasattr(_lowerCamelCase , 'image_std' ) )
self.assertTrue(hasattr(_lowerCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_lowerCamelCase , 'do_resize' ) )
self.assertTrue(hasattr(_lowerCamelCase , 'size' ) )
def _a ( self ):
UpperCamelCase_: List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , _lowerCamelCase )
UpperCamelCase_: Optional[int] = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=_lowerCamelCase )
self.assertEqual(image_processor.size , {'shortest_edge': 4_2, 'longest_edge': 8_4} )
self.assertEqual(image_processor.do_pad , _lowerCamelCase )
def _a ( self ):
pass
def _a ( self ):
UpperCamelCase_: int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase_: List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , Image.Image )
# Test not batched input
UpperCamelCase_: Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
UpperCamelCase_ ,UpperCamelCase_: str = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase_ ,UpperCamelCase_: str = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
UpperCamelCase_: Tuple = image_processing(_lowerCamelCase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a ( self ):
UpperCamelCase_: Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase_: Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , np.ndarray )
# Test not batched input
UpperCamelCase_: Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
UpperCamelCase_ ,UpperCamelCase_: Dict = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase_: List[str] = image_processing(_lowerCamelCase , return_tensors='pt' ).pixel_values
UpperCamelCase_ ,UpperCamelCase_: Union[str, Any] = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _a ( self ):
UpperCamelCase_: Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase_: Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowerCamelCase , torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase , torch.Tensor )
# Test not batched input
UpperCamelCase_: str = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
UpperCamelCase_ ,UpperCamelCase_: List[Any] = self.image_processor_tester.get_expected_values(_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase_: Tuple = image_processing(_lowerCamelCase , return_tensors='pt' ).pixel_values
UpperCamelCase_ ,UpperCamelCase_: Union[str, Any] = self.image_processor_tester.get_expected_values(_lowerCamelCase , batched=_lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _a ( self ):
UpperCamelCase_: str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
UpperCamelCase_: List[str] = json.loads(f.read() )
UpperCamelCase_: Optional[int] = {'image_id': 3_9_7_6_9, 'annotations': target}
# encode them
UpperCamelCase_: int = ConditionalDetrImageProcessor.from_pretrained('microsoft/conditional-detr-resnet-50' )
UpperCamelCase_: Union[str, Any] = image_processing(images=_lowerCamelCase , annotations=_lowerCamelCase , return_tensors='pt' )
# verify pixel values
UpperCamelCase_: Any = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , _lowerCamelCase )
UpperCamelCase_: Tuple = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _lowerCamelCase , atol=1e-4 ) )
# verify area
UpperCamelCase_: Tuple = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _lowerCamelCase ) )
# verify boxes
UpperCamelCase_: str = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _lowerCamelCase )
UpperCamelCase_: Union[str, Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _lowerCamelCase , atol=1e-3 ) )
# verify image_id
UpperCamelCase_: Optional[int] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _lowerCamelCase ) )
# verify is_crowd
UpperCamelCase_: Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _lowerCamelCase ) )
# verify class_labels
UpperCamelCase_: Tuple = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _lowerCamelCase ) )
# verify orig_size
UpperCamelCase_: Any = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _lowerCamelCase ) )
# verify size
UpperCamelCase_: List[str] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _lowerCamelCase ) )
@slow
def _a ( self ):
UpperCamelCase_: str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
UpperCamelCase_: int = json.loads(f.read() )
UpperCamelCase_: List[Any] = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target}
UpperCamelCase_: List[Any] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
UpperCamelCase_: Any = ConditionalDetrImageProcessor(format='coco_panoptic' )
UpperCamelCase_: int = image_processing(images=_lowerCamelCase , annotations=_lowerCamelCase , masks_path=_lowerCamelCase , return_tensors='pt' )
# verify pixel values
UpperCamelCase_: List[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , _lowerCamelCase )
UpperCamelCase_: Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _lowerCamelCase , atol=1e-4 ) )
# verify area
UpperCamelCase_: Any = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _lowerCamelCase ) )
# verify boxes
UpperCamelCase_: int = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _lowerCamelCase )
UpperCamelCase_: List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _lowerCamelCase , atol=1e-3 ) )
# verify image_id
UpperCamelCase_: List[str] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _lowerCamelCase ) )
# verify is_crowd
UpperCamelCase_: List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _lowerCamelCase ) )
# verify class_labels
UpperCamelCase_: Optional[Any] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _lowerCamelCase ) )
# verify masks
UpperCamelCase_: Optional[Any] = 8_2_2_8_7_3
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _lowerCamelCase )
# verify orig_size
UpperCamelCase_: int = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _lowerCamelCase ) )
# verify size
UpperCamelCase_: str = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _lowerCamelCase ) )
| 57
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 551
| 0
|
from collections.abc import Generator
def __magic_name__ ( ) -> Generator[int, None, None]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = 0, 1
while True:
UpperCamelCase , UpperCamelCase = b, a + b
yield b
def __magic_name__ ( lowercase_ = 1000 ) -> int:
'''simple docstring'''
UpperCamelCase = 1
UpperCamelCase = fibonacci_generator()
while len(str(next(lowercase_ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 414
|
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__a : Union[str, Any] = logging.get_logger(__name__)
def __magic_name__ ( lowercase_ ) -> Dict:
'''simple docstring'''
UpperCamelCase = torch.load(lowercase_ , map_location="cpu" )
if "model" in sd.keys():
UpperCamelCase = torch.load(lowercase_ , map_location="cpu" )["model"]
# pop unnecessary weights
UpperCamelCase = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(lowercase_ )
UpperCamelCase = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
UpperCamelCase = sd.pop(lowercase_ )
UpperCamelCase = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
UpperCamelCase = sd[key]
# We split QKV in separate Q,K,V
UpperCamelCase = key.replace(".qkv_proj." , ".q_proj." )
UpperCamelCase = key.replace(".qkv_proj." , ".k_proj." )
UpperCamelCase = key.replace(".qkv_proj." , ".v_proj." )
UpperCamelCase = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
UpperCamelCase , UpperCamelCase , UpperCamelCase = torch.split(lowercase_ , depth // 3 , dim=0 )
UpperCamelCase = q
UpperCamelCase = k
UpperCamelCase = v
del sd[key]
return sd
@torch.no_grad()
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_=None ) -> str:
'''simple docstring'''
UpperCamelCase = load_checkpoint(lowercase_ )
if config is not None:
UpperCamelCase = OPTConfig.from_pretrained(lowercase_ )
else:
UpperCamelCase = OPTConfig()
UpperCamelCase = OPTModel(lowercase_ ).half().eval()
model.load_state_dict(lowercase_ )
# Check results
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
if __name__ == "__main__":
__a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
__a : Dict = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 414
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case ( __snake_case : Tuple , __snake_case : Union[str, Any] , __snake_case : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Tuple = LxmertConfig.from_json_file(__snake_case )
print(F'Building PyTorch model from configuration: {config}' )
_lowerCamelCase : Any = LxmertForPreTraining(__snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(__snake_case , __snake_case , __snake_case )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , __snake_case )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 88
|
"""simple docstring"""
from math import isqrt, loga
def _snake_case ( __snake_case : int ):
"""simple docstring"""
_lowerCamelCase : List[str] = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __snake_case , __snake_case ):
_lowerCamelCase : Optional[int] = False
return [i for i in range(2 , __snake_case ) if is_prime[i]]
def _snake_case ( __snake_case : int = 800800 , __snake_case : int = 800800 ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = degree * loga(__snake_case )
_lowerCamelCase : Union[str, Any] = int(__snake_case )
_lowerCamelCase : Dict = calculate_prime_numbers(__snake_case )
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : Any = 0
_lowerCamelCase : Any = len(__snake_case ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 88
| 1
|
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
SCREAMING_SNAKE_CASE__ = """base_with_context"""
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict , __lowerCamelCase: Union[str, Any] ):
'''simple docstring'''
lowercase_ = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) )
lowercase_ = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=__lowerCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
lowercase_ = weights[F'layers_{lyr_num}']
lowercase_ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
lowercase_ = ly_weight["attention"]
lowercase_ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
lowercase_ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
lowercase_ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
lowercase_ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
lowercase_ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
lowercase_ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
lowercase_ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
lowercase_ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
lowercase_ = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any , __lowerCamelCase: Tuple ):
'''simple docstring'''
lowercase_ = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) )
lowercase_ = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=__lowerCamelCase )
for lyr_num, lyr in enumerate(model.encoders ):
lowercase_ = weights[F'layers_{lyr_num}']
lowercase_ = ly_weight["attention"]
lowercase_ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
lowercase_ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
lowercase_ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
lowercase_ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
lowercase_ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
lowercase_ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
lowercase_ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
lowercase_ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
lowercase_ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
lowercase_ = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: List[str] ):
'''simple docstring'''
lowercase_ = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) )
lowercase_ = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) )
lowercase_ = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=__lowerCamelCase )
lowercase_ = nn.Parameter(
torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
lowercase_ = weights[F'layers_{lyr_num}']
lowercase_ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) )
lowercase_ = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) )
lowercase_ = ly_weight["self_attention"]
lowercase_ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
lowercase_ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
lowercase_ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
lowercase_ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
lowercase_ = ly_weight["MultiHeadDotProductAttention_0"]
lowercase_ = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
lowercase_ = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
lowercase_ = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
lowercase_ = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
lowercase_ = nn.Parameter(
torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) )
lowercase_ = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
lowercase_ = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) )
lowercase_ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
lowercase_ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
lowercase_ = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
lowercase_ = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) )
lowercase_ = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) )
return model
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
lowercase_ = checkpoints.load_tax_checkpoint(args.checkpoint_path )
lowercase_ = jnp.tree_util.tree_map(onp.array , __lowerCamelCase )
lowercase_ = [
"from __gin__ import dynamic_registration",
"from music_spectrogram_diffusion.models.diffusion import diffusion_utils",
"diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0",
"diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()",
]
lowercase_ = os.path.join(args.checkpoint_path , ".." , "config.gin" )
lowercase_ = inference.parse_training_gin_file(__lowerCamelCase , __lowerCamelCase )
lowercase_ = inference.InferenceModel(args.checkpoint_path , __lowerCamelCase )
lowercase_ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" , variance_type="fixed_large" )
lowercase_ = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["inputs"] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
lowercase_ = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["targets_context"] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
lowercase_ = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["targets_context"] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
lowercase_ = load_notes_encoder(ta_checkpoint["target"]["token_encoder"] , __lowerCamelCase )
lowercase_ = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] , __lowerCamelCase )
lowercase_ = load_decoder(ta_checkpoint["target"]["decoder"] , __lowerCamelCase )
lowercase_ = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" )
lowercase_ = SpectrogramDiffusionPipeline(
notes_encoder=__lowerCamelCase , continuous_encoder=__lowerCamelCase , decoder=__lowerCamelCase , scheduler=__lowerCamelCase , melgan=__lowerCamelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument(
"""--checkpoint_path""",
default=f"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help="""Path to the original jax model checkpoint.""",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
main(args)
| 713
|
# using dfs for finding eulerian path traversal
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any , __lowerCamelCase: Tuple , __lowerCamelCase: List[Any] , __lowerCamelCase: Union[str, Any]=None ):
'''simple docstring'''
lowercase_ = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
lowercase_ , lowercase_ = True, True
lowercase_ = dfs(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return path
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Any , __lowerCamelCase: Union[str, Any] ):
'''simple docstring'''
lowercase_ = 0
lowercase_ = -1
for i in range(__lowerCamelCase ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
lowercase_ = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Union[str, Any] , __lowerCamelCase: List[str] ):
'''simple docstring'''
lowercase_ = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
lowercase_ , lowercase_ = check_circuit_or_path(__lowerCamelCase , __lowerCamelCase )
if check == 3:
print("graph is not Eulerian" )
print("no path" )
return
lowercase_ = 1
if check == 2:
lowercase_ = odd_node
print("graph has a Euler path" )
if check == 1:
print("graph has a Euler cycle" )
lowercase_ = dfs(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
print(__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
lowercase_ = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
lowercase_ = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
lowercase_ = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
lowercase_ = {
1: [],
2: []
# all degree is zero
}
lowercase_ = 10
check_euler(__lowerCamelCase , __lowerCamelCase )
check_euler(__lowerCamelCase , __lowerCamelCase )
check_euler(__lowerCamelCase , __lowerCamelCase )
check_euler(__lowerCamelCase , __lowerCamelCase )
check_euler(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 601
| 0
|
"""simple docstring"""
a = 256
# Modulus to hash a string
a = 1_000_003
def _snake_case ( _snake_case : str , _snake_case : str ) -> bool:
'''simple docstring'''
_A = len(_snake_case )
_A = len(_snake_case )
if p_len > t_len:
return False
_A = 0
_A = 0
_A = 1
# Calculating the hash of pattern and substring of text
for i in range(_snake_case ):
_A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_A = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_A = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_A = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _snake_case ( ) -> None:
'''simple docstring'''
_A = 'abc1abc12'
_A = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
_A = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(_snake_case , _snake_case ) and not rabin_karp(_snake_case , _snake_case )
# Test 2)
_A = 'ABABX'
_A = 'ABABZABABYABABX'
assert rabin_karp(_snake_case , _snake_case )
# Test 3)
_A = 'AAAB'
_A = 'ABAAAAAB'
assert rabin_karp(_snake_case , _snake_case )
# Test 4)
_A = 'abcdabcy'
_A = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(_snake_case , _snake_case )
# Test 5)
_A = 'Lü'
_A = 'Lüsai'
assert rabin_karp(_snake_case , _snake_case )
_A = 'Lue'
assert not rabin_karp(_snake_case , _snake_case )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 7
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Any = ['''image_processor''', '''tokenizer''']
UpperCAmelCase : Optional[int] = '''ViTImageProcessor'''
UpperCAmelCase : int = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Tuple , _UpperCAmelCase : int=None , _UpperCAmelCase : Tuple=None , **_UpperCAmelCase : Dict ):
_A = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _UpperCAmelCase , )
_A = kwargs.pop('feature_extractor' )
_A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self : Optional[Any] , _UpperCAmelCase : int=None , _UpperCAmelCase : int=None , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : Optional[int]=None , **_UpperCAmelCase : Union[str, Any] ):
if text is None and visual_prompt is None and images is None:
raise ValueError('You have to specify either text, visual prompt or images.' )
if text is not None and visual_prompt is not None:
raise ValueError('You have to specify exactly one type of prompt. Either text or visual prompt.' )
if text is not None:
_A = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if visual_prompt is not None:
_A = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if images is not None:
_A = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if visual_prompt is not None and images is not None:
_A = {
'pixel_values': image_features.pixel_values,
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
_A = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
_A = {
'conditional_pixel_values': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def lowerCAmelCase_ ( self : str , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : Union[str, Any] ):
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : Union[str, Any] ):
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def lowerCAmelCase_ ( self : Dict ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _UpperCAmelCase , )
return self.image_processor_class
@property
def lowerCAmelCase_ ( self : Tuple ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _UpperCAmelCase , )
return self.image_processor
| 7
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase : Any = {'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : str = ['PLBartTokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Dict = [
'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'PLBartForCausalLM',
'PLBartForConditionalGeneration',
'PLBartForSequenceClassification',
'PLBartModel',
'PLBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 705
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
__UpperCAmelCase = {
'configuration_ernie': ['ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ErnieConfig', 'ErnieOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ErnieForCausalLM',
'ErnieForMaskedLM',
'ErnieForMultipleChoice',
'ErnieForNextSentencePrediction',
'ErnieForPreTraining',
'ErnieForQuestionAnswering',
'ErnieForSequenceClassification',
'ErnieForTokenClassification',
'ErnieModel',
'ErniePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 503
| 0
|
'''simple docstring'''
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a = "▁"
a = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : str = BertGenerationTokenizer
__UpperCamelCase : Tuple = False
__UpperCamelCase : int = True
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
__SCREAMING_SNAKE_CASE = BertGenerationTokenizer(lowerCamelCase ,keep_accents=lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """<s>"""
__SCREAMING_SNAKE_CASE = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) ,lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) ,lowerCamelCase )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"""<unk>""" )
self.assertEqual(vocab_keys[1] ,"""<s>""" )
self.assertEqual(vocab_keys[-1] ,"""<pad>""" )
self.assertEqual(len(lowerCamelCase ) ,1002 )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,1000 )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BertGenerationTokenizer(lowerCamelCase ,keep_accents=lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCamelCase ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ) ,[285, 46, 10, 170, 382] ,)
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCamelCase ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] ,)
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(lowerCamelCase )
self.assertListEqual(
lowerCamelCase ,[8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] ,)
__SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(lowerCamelCase )
self.assertListEqual(
lowerCamelCase ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] ,)
@cached_property
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return BertGenerationTokenizer.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
@slow
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """Hello World!"""
__SCREAMING_SNAKE_CASE = [1_8536, 2260, 101]
self.assertListEqual(lowerCamelCase ,self.big_tokenizer.encode(lowerCamelCase ) )
@slow
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"""
)
__SCREAMING_SNAKE_CASE = [
871,
419,
358,
946,
991,
2521,
452,
358,
1357,
387,
7751,
3536,
112,
985,
456,
126,
865,
938,
5400,
5734,
458,
1368,
467,
786,
2462,
5246,
1159,
633,
865,
4519,
457,
582,
852,
2557,
427,
916,
508,
405,
3_4324,
497,
391,
408,
1_1342,
1244,
385,
100,
938,
985,
456,
574,
362,
1_2597,
3200,
3129,
1172,
]
self.assertListEqual(lowerCamelCase ,self.big_tokenizer.encode(lowerCamelCase ) )
@require_torch
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
__SCREAMING_SNAKE_CASE = list(self.big_tokenizer.get_vocab().keys() )[:10]
__SCREAMING_SNAKE_CASE = """ """.join(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.big_tokenizer.encode_plus(lowerCamelCase ,return_tensors="""pt""" ,return_token_type_ids=lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.big_tokenizer.batch_encode_plus(
[sequence + """ """ + sequence] ,return_tensors="""pt""" ,return_token_type_ids=lowerCamelCase )
__SCREAMING_SNAKE_CASE = BertGenerationConfig()
__SCREAMING_SNAKE_CASE = BertGenerationEncoder(lowerCamelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCamelCase )
model(**lowerCamelCase )
@slow
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {"""input_ids""": [[3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114], [448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase ,model_name="""google/bert_for_seq_generation_L-24_bbc_encoder""" ,revision="""c817d1fd1be2ffa69431227a1fe320544943d4db""" ,)
| 109
|
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __a ( _snake_case ):
__UpperCamelCase : Any = ''
__UpperCamelCase : int = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self : Any ,lowerCamelCase : Optional[DatasetInfo] = None ,lowerCamelCase : Optional[str] = None ,**lowerCamelCase : Dict ,):
'''simple docstring'''
super().__init__(self ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = repo_info
__SCREAMING_SNAKE_CASE = token
__SCREAMING_SNAKE_CASE = None
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
if self.dir_cache is None:
__SCREAMING_SNAKE_CASE = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__SCREAMING_SNAKE_CASE = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(lowerCamelCase ): {"""name""": str(lowerCamelCase ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : str ,lowerCamelCase : str = "rb" ,**lowerCamelCase : Optional[Any] ,):
'''simple docstring'''
if not isinstance(self.repo_info ,lowerCamelCase ):
raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
__SCREAMING_SNAKE_CASE = hf_hub_url(self.repo_info.id ,lowerCamelCase ,revision=self.repo_info.sha )
return fsspec.open(
lowerCamelCase ,mode=lowerCamelCase ,headers=get_authentication_headers_for_url(lowerCamelCase ,use_auth_token=self.token ) ,client_kwargs={"""trust_env""": True} ,).open()
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : Any ,**lowerCamelCase : Optional[Any] ):
'''simple docstring'''
self._get_dirs()
__SCREAMING_SNAKE_CASE = self._strip_protocol(lowerCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(lowerCamelCase )
def UpperCAmelCase__ ( self : str ,lowerCamelCase : Any ,lowerCamelCase : str=False ,**lowerCamelCase : Any ):
'''simple docstring'''
self._get_dirs()
__SCREAMING_SNAKE_CASE = PurePosixPath(path.strip("""/""" ) )
__SCREAMING_SNAKE_CASE = {}
for p, f in self.dir_cache.items():
__SCREAMING_SNAKE_CASE = PurePosixPath(p.strip("""/""" ) )
__SCREAMING_SNAKE_CASE = p.parent
if root == path:
__SCREAMING_SNAKE_CASE = f
__SCREAMING_SNAKE_CASE = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 109
| 1
|
"""simple docstring"""
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def snake_case (A_ :Optional[int] , A_ :int , A_ :Dict , A_ :Any , ):
'''simple docstring'''
a : List[Any] = coefficient_matrix.shape
a : Any = constant_matrix.shape
if rowsa != colsa:
a : int = f'''Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'''
raise ValueError(A_ )
if colsa != 1:
a : Tuple = f'''Constant matrix must be nx1 but received {rowsa}x{colsa}'''
raise ValueError(A_ )
if rowsa != rowsa:
a : Tuple = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
f'''received {rowsa}x{colsa} and {rowsa}x{colsa}'''
)
raise ValueError(A_ )
if len(A_ ) != rowsa:
a : List[str] = (
"Number of initial values must be equal to number of rows in coefficient "
f'''matrix but received {len(A_ )} and {rowsa}'''
)
raise ValueError(A_ )
if iterations <= 0:
raise ValueError('Iterations must be at least 1' )
a : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
a : Tuple = table.shape
strictly_diagonally_dominant(A_ )
# Iterates the whole matrix for given number of times
for _ in range(A_ ):
a : str = []
for row in range(A_ ):
a : Optional[int] = 0
for col in range(A_ ):
if col == row:
a : Optional[Any] = table[row][col]
elif col == cols - 1:
a : Optional[Any] = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
a : List[str] = (temp + val) / denom
new_val.append(A_ )
a : Optional[Any] = new_val
return [float(A_ ) for i in new_val]
def snake_case (A_ :Union[str, Any] ):
'''simple docstring'''
a : Optional[int] = table.shape
a : Optional[Any] = True
for i in range(0 , A_ ):
a : List[Any] = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('Coefficient matrix is not strictly diagonally dominant' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719
|
"""simple docstring"""
_UpperCamelCase : Tuple = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_UpperCamelCase : str = [{'type': 'code', 'content': INSTALL_CONTENT}]
_UpperCamelCase : Tuple = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 118
| 0
|
"""simple docstring"""
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
if n == 1 or not isinstance(__UpperCamelCase , __UpperCamelCase ):
return 0
elif n == 2:
return 1
else:
snake_case_ : Union[str, Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Dict = 0
snake_case_ : Optional[Any] = 2
while digits < n:
index += 1
snake_case_ : Any = len(str(fibonacci(__UpperCamelCase ) ) )
return index
def __lowerCAmelCase ( __UpperCamelCase : int = 1_0_0_0 ):
'''simple docstring'''
return fibonacci_digits_index(__UpperCamelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 58
|
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A:
'''simple docstring'''
def __init__( self : List[Any] , A_ : Dict , A_ : Union[str, Any]=13 , A_ : List[Any]=30 , A_ : Optional[Any]=2 , A_ : List[str]=3 , A_ : List[str]=True , A_ : Dict=True , A_ : List[Any]=32 , A_ : Any=2 , A_ : Any=4 , A_ : Optional[int]=37 , A_ : Dict="gelu" , A_ : List[Any]=0.1 , A_ : Optional[int]=0.1 , A_ : Union[str, Any]=10 , A_ : Optional[Any]=0.02 , A_ : List[Any]=3 , A_ : str=None , ) -> str:
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = image_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase_ = (image_size // patch_size) ** 2
lowerCamelCase_ = num_patches + 1
def a__ ( self : List[str] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = self.get_config()
return config, pixel_values, labels
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , )
def a__ ( self : Any , A_ : int , A_ : int , A_ : int ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = TFViTModel(config=A_ )
lowerCamelCase_ = model(A_ , training=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
lowerCamelCase_ = self.image_size // 2
lowerCamelCase_ = pixel_values[:, :, :image_size, :image_size]
lowerCamelCase_ = model(A_ , interpolate_pos_encoding=A_ , training=A_ )
lowerCamelCase_ = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def a__ ( self : List[Any] , A_ : List[Any] , A_ : Any , A_ : Any ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = self.type_sequence_label_size
lowerCamelCase_ = TFViTForImageClassification(A_ )
lowerCamelCase_ = model(A_ , labels=A_ , training=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
lowerCamelCase_ = self.image_size // 2
lowerCamelCase_ = pixel_values[:, :, :image_size, :image_size]
lowerCamelCase_ = model(A_ , interpolate_pos_encoding=A_ , training=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase_ = 1
lowerCamelCase_ = TFViTForImageClassification(A_ )
lowerCamelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = config_and_inputs
lowerCamelCase_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class A( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
UpperCamelCase = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def a__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TFViTModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def a__ ( self : int ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason='ViT does not use inputs_embeds' )
def a__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
pass
def a__ ( self : str ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
lowerCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , tf.keras.layers.Layer ) )
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(A_ )
lowerCamelCase_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def a__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@slow
def a__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ = TFViTModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(A_ )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class A( unittest.TestCase ):
'''simple docstring'''
@cached_property
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
lowerCamelCase_ = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' )
lowerCamelCase_ = self.default_image_processor
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = image_processor(images=A_ , return_tensors='tf' )
# forward pass
lowerCamelCase_ = model(**A_ )
# verify the logits
lowerCamelCase_ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , A_ )
lowerCamelCase_ = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] , A_ , atol=1E-4 )
| 70
| 0
|
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler')
class lowerCAmelCase_ :
def __init__( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = False ) -> int:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = scheduler
_UpperCAmelCase : Optional[int] = optimizers if isinstance(UpperCAmelCase_ , (list, tuple) ) else [optimizers]
_UpperCAmelCase : Dict = split_batches
_UpperCAmelCase : Union[str, Any] = step_with_optimizer
_UpperCAmelCase : int = GradientState()
def a_ ( self : List[Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Dict ) -> Tuple:
'''simple docstring'''
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*UpperCAmelCase_ , **UpperCAmelCase_ )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*UpperCAmelCase_ , **UpperCAmelCase_ )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
_UpperCAmelCase : Union[str, Any] = AcceleratorState().num_processes
for _ in range(UpperCAmelCase_ ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*UpperCAmelCase_ , **UpperCAmelCase_ )
else:
self.scheduler.step(*UpperCAmelCase_ , **UpperCAmelCase_ )
def a_ ( self : Tuple ) -> Any:
'''simple docstring'''
return self.scheduler.get_last_lr()
def a_ ( self : List[Any] ) -> Any:
'''simple docstring'''
return self.scheduler.state_dict()
def a_ ( self : Dict , UpperCAmelCase_ : Any ) -> Optional[int]:
'''simple docstring'''
self.scheduler.load_state_dict(UpperCAmelCase_ )
def a_ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return self.scheduler.get_lr()
def a_ ( self : Dict , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Optional[int] ) -> str:
'''simple docstring'''
return self.scheduler.print_lr(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 416
|
from typing import List
from .keymap import KEYMAP, get_character
def _A ( _UpperCamelCase ):
def decorator(_UpperCamelCase ):
_UpperCAmelCase : Optional[int] = getattr(_UpperCamelCase , '''handle_key''' , [] )
handle += [key]
setattr(_UpperCamelCase , '''handle_key''' , _UpperCamelCase )
return func
return decorator
def _A ( *_UpperCamelCase ):
def decorator(_UpperCamelCase ):
_UpperCAmelCase : Any = getattr(_UpperCamelCase , '''handle_key''' , [] )
handle += keys
setattr(_UpperCamelCase , '''handle_key''' , _UpperCamelCase )
return func
return decorator
class lowerCAmelCase_ ( lowercase_ ):
def __new__( cls : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = super().__new__(cls , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if not hasattr(UpperCAmelCase_ , '''key_handler''' ):
setattr(UpperCAmelCase_ , '''key_handler''' , {} )
setattr(UpperCAmelCase_ , '''handle_input''' , KeyHandler.handle_input )
for value in attrs.values():
_UpperCAmelCase : List[str] = getattr(UpperCAmelCase_ , '''handle_key''' , [] )
for key in handled_keys:
_UpperCAmelCase : Optional[Any] = value
return new_cls
@staticmethod
def a_ ( cls : Optional[Any] ) -> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = get_character()
if char != KEYMAP["undefined"]:
_UpperCAmelCase : str = ord(UpperCAmelCase_ )
_UpperCAmelCase : str = cls.key_handler.get(UpperCAmelCase_ )
if handler:
_UpperCAmelCase : Optional[int] = char
return handler(cls )
else:
return None
def _A ( cls ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 416
| 1
|
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
__lowercase = 'hf-internal-testing/tiny-random-t5'
__lowercase = AutoTokenizer.from_pretrained(__lowerCamelCase )
__lowercase = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
__lowercase = tokenizer('This is me' , return_tensors='pt' )
__lowercase = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
__lowercase = model.generate(**__lowerCamelCase )
__lowercase = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase )
__lowercase = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
__lowercase = model_reloaded.generate(**__lowerCamelCase )
self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase ) )
def UpperCAmelCase ( self : str ) -> List[str]:
'''simple docstring'''
__lowercase = 'hf-internal-testing/tiny-random-t5'
__lowercase = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
__lowercase = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__lowerCamelCase ):
model.save_pretrained(__lowerCamelCase )
__lowercase = model.reverse_bettertransformer()
model.save_pretrained(__lowerCamelCase )
| 375
|
def SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , snake_case ) -> int:
# Return True if there is node that has not iterated.
__lowercase = [False] * len(snake_case )
__lowercase = []
queue.append(snake_case )
__lowercase = True
while queue:
__lowercase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(snake_case )
__lowercase = True
__lowercase = u
return visited[t]
def SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> Dict:
# This array is filled by BFS and to store path
__lowercase = [-1] * (len(snake_case ))
__lowercase = 0
while bfs(snake_case , snake_case , snake_case , snake_case ):
__lowercase = float('Inf' )
__lowercase = sink
while s != source:
# Find the minimum value in select path
__lowercase = min(snake_case , graph[parent[s]][s] )
__lowercase = parent[s]
max_flow += path_flow
__lowercase = sink
while v != source:
__lowercase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__lowercase = parent[v]
return max_flow
SCREAMING_SNAKE_CASE_ : Any = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ : Optional[Any] = 0, 5
print(ford_fulkerson(graph, source, sink))
| 375
| 1
|
class _a( __A ):
pass
class _a( __A ):
pass
class _a:
def __init__( self ) -> Tuple:
'''simple docstring'''
_snake_case : Tuple = [
[],
[],
[],
]
def lowercase ( self , __snake_case , __snake_case ) -> None:
'''simple docstring'''
try:
if len(self.queues[priority] ) >= 1_0_0:
raise OverflowError("Maximum queue size is 100" )
self.queues[priority].append(__UpperCamelCase )
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2" )
def lowercase ( self ) -> int:
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("All queues are empty" )
def __str__( self ) -> str:
'''simple docstring'''
return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues ) )
class _a:
def __init__( self ) -> Optional[Any]:
'''simple docstring'''
_snake_case : Optional[int] = []
def lowercase ( self , __snake_case ) -> None:
'''simple docstring'''
if len(self.queue ) == 1_0_0:
raise OverFlowError("Maximum queue size is 100" )
self.queue.append(__UpperCamelCase )
def lowercase ( self ) -> int:
'''simple docstring'''
if not self.queue:
raise UnderFlowError("The queue is empty" )
else:
_snake_case : int = min(self.queue )
self.queue.remove(__UpperCamelCase )
return data
def __str__( self ) -> str:
'''simple docstring'''
return str(self.queue )
def A ( ):
_snake_case : Any = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(_SCREAMING_SNAKE_CASE )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_SCREAMING_SNAKE_CASE )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def A ( ):
_snake_case : Dict = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(_SCREAMING_SNAKE_CASE )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_SCREAMING_SNAKE_CASE )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 707
|
import re
def A ( UpperCAmelCase ):
_snake_case : Any = re.compile(R"^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$" )
if match := re.search(UpperCAmelCase , UpperCAmelCase ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895'))
| 278
| 0
|
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __snake_case( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase : Dict = (DDPMScheduler,)
def __snake_case ( self , **A_ ) -> Union[str, Any]:
lowerCAmelCase = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**_lowerCamelCase )
return config
def __snake_case ( self ) -> Union[str, Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCamelCase )
def __snake_case ( self ) -> str:
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=_lowerCamelCase , beta_end=_lowerCamelCase )
def __snake_case ( self ) -> Union[str, Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowerCamelCase )
def __snake_case ( self ) -> Dict:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_lowerCamelCase )
def __snake_case ( self ) -> List[Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCamelCase )
def __snake_case ( self ) -> Optional[Any]:
self.check_over_configs(thresholding=_lowerCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_lowerCamelCase , prediction_type=_lowerCamelCase , sample_max_value=_lowerCamelCase , )
def __snake_case ( self ) -> Optional[int]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCamelCase )
def __snake_case ( self ) -> Any:
for t in [0, 500, 999]:
self.check_over_forward(time_step=_lowerCamelCase )
def __snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def __snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_lowerCamelCase )
lowerCAmelCase = len(_lowerCamelCase )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowerCamelCase ) ):
# 1. predict noise residual
lowerCAmelCase = model(_lowerCamelCase , _lowerCamelCase )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase = pred_prev_sample
lowerCAmelCase = torch.sum(torch.abs(_lowerCamelCase ) )
lowerCAmelCase = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def __snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
lowerCAmelCase = scheduler_class(**_lowerCamelCase )
lowerCAmelCase = len(_lowerCamelCase )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter
lowerCAmelCase = torch.manual_seed(0 )
for t in reversed(range(_lowerCamelCase ) ):
# 1. predict noise residual
lowerCAmelCase = model(_lowerCamelCase , _lowerCamelCase )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCAmelCase = pred_prev_sample
lowerCAmelCase = torch.sum(torch.abs(_lowerCamelCase ) )
lowerCAmelCase = torch.mean(torch.abs(_lowerCamelCase ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def __snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_lowerCamelCase )
lowerCAmelCase = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_lowerCamelCase )
lowerCAmelCase = scheduler.timesteps
for i, timestep in enumerate(_lowerCamelCase ):
if i == len(_lowerCamelCase ) - 1:
lowerCAmelCase = -1
else:
lowerCAmelCase = timesteps[i + 1]
lowerCAmelCase = scheduler.previous_timestep(_lowerCamelCase )
lowerCAmelCase = prev_t.item()
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
def __snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_lowerCamelCase )
lowerCAmelCase = [100, 87, 50, 51, 0]
with self.assertRaises(_lowerCamelCase , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=_lowerCamelCase )
def __snake_case ( self ) -> Optional[int]:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_lowerCamelCase )
lowerCAmelCase = [100, 87, 50, 1, 0]
lowerCAmelCase = len(_lowerCamelCase )
with self.assertRaises(_lowerCamelCase , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=_lowerCamelCase , timesteps=_lowerCamelCase )
def __snake_case ( self ) -> Tuple:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**_lowerCamelCase )
lowerCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_lowerCamelCase , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=_lowerCamelCase )
| 433
|
'''simple docstring'''
import requests
_SCREAMING_SNAKE_CASE = '''YOUR API KEY'''
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : str = giphy_api_key ):
__lowercase = '''+'''.join(query.split() )
__lowercase = f"https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"
__lowercase = requests.get(lowerCamelCase_ ).json()['''data''']
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('''\n'''.join(get_gifs('''space ship''')))
| 502
| 0
|
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__SCREAMING_SNAKE_CASE : List[Any] ='src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
__SCREAMING_SNAKE_CASE : Any =importlib.util.spec_from_file_location(
'''transformers''',
os.path.join(PATH_TO_TRANSFORMERS, '''__init__.py'''),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
__SCREAMING_SNAKE_CASE : Tuple =spec.loader.load_module()
__SCREAMING_SNAKE_CASE : Optional[int] =transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__SCREAMING_SNAKE_CASE : Any =re.compile('''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
__SCREAMING_SNAKE_CASE : Optional[int] ={
'CLIPConfigMixin',
'DecisionTransformerConfigMixin',
'EncoderDecoderConfigMixin',
'RagConfigMixin',
'SpeechEncoderDecoderConfigMixin',
'VisionEncoderDecoderConfigMixin',
'VisionTextDualEncoderConfigMixin',
}
def UpperCamelCase__ ( ):
lowercase = []
for config_class in list(CONFIG_MAPPING.values() ):
lowercase = False
# source code of `config_class`
lowercase = inspect.getsource(lowerCAmelCase__ )
lowercase = _re_checkpoint.findall(lowerCAmelCase__ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
lowercase = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
lowercase = f"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
lowercase = True
break
lowercase = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
lowercase = '''\n'''.join(sorted(lowerCAmelCase__ ) )
raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 717
|
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def UpperCamelCase__ ( lowerCAmelCase__ ):
lowercase = args.pruning_method
lowercase = args.threshold
lowercase = args.model_name_or_path.rstrip("""/""" )
lowercase = args.target_model_path
print(f"""Load fine-pruned model from {model_name_or_path}""" )
lowercase = torch.load(os.path.join(lowerCAmelCase__ ,"""pytorch_model.bin""" ) )
lowercase = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
lowercase = tensor
print(f"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
lowercase = tensor
print(f"""Copied layer {name}""" )
elif "bias" in name:
lowercase = tensor
print(f"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
lowercase = MagnitudeBinarizer.apply(inputs=lowerCAmelCase__ ,threshold=lowerCAmelCase__ )
lowercase = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
lowercase = name[:-6]
lowercase = model[f"""{prefix_}mask_scores"""]
lowercase = TopKBinarizer.apply(lowerCAmelCase__ ,lowerCAmelCase__ )
lowercase = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
lowercase = name[:-6]
lowercase = model[f"""{prefix_}mask_scores"""]
lowercase = ThresholdBinarizer.apply(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
lowercase = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
lowercase = name[:-6]
lowercase = model[f"""{prefix_}mask_scores"""]
lowercase , lowercase = -0.1, 1.1
lowercase = torch.sigmoid(lowerCAmelCase__ )
lowercase = s * (r - l) + l
lowercase = s_bar.clamp(min=0.0 ,max=1.0 )
lowercase = tensor * mask
print(f"""Pruned layer {name}""" )
else:
raise ValueError("""Unknown pruning method""" )
if target_model_path is None:
lowercase = os.path.join(
os.path.dirname(lowerCAmelCase__ ) ,f"""bertarized_{os.path.basename(lowerCAmelCase__ )}""" )
if not os.path.isdir(lowerCAmelCase__ ):
shutil.copytree(lowerCAmelCase__ ,lowerCAmelCase__ )
print(f"""\nCreated folder {target_model_path}""" )
torch.save(lowerCAmelCase__ ,os.path.join(lowerCAmelCase__ ,"""pytorch_model.bin""" ) )
print("""\nPruned model saved! See you later!""" )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[str] =argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
__SCREAMING_SNAKE_CASE : str =parser.parse_args()
main(args)
| 72
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
def lowerCamelCase ( ) ->List[str]:
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = 2
while True:
_SCREAMING_SNAKE_CASE = factor_map.pop(_snake_case , _snake_case )
if factor:
_SCREAMING_SNAKE_CASE = factor + prime
while x in factor_map:
x += factor
_SCREAMING_SNAKE_CASE = factor
else:
_SCREAMING_SNAKE_CASE = prime
yield prime
prime += 1
def lowerCamelCase ( __lowerCamelCase : List[str] = 1e1_0 ) ->str:
_SCREAMING_SNAKE_CASE = sieve()
_SCREAMING_SNAKE_CASE = 1
while True:
_SCREAMING_SNAKE_CASE = next(_snake_case )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(_snake_case )
n += 2
if __name__ == "__main__":
print(solution())
| 314
|
"""simple docstring"""
def lowerCamelCase ( _snake_case ,_snake_case ):
return int((input_a, input_a).count(0 ) == 0 )
def lowerCamelCase ( ):
assert and_gate(0 ,0 ) == 0
assert and_gate(0 ,1 ) == 0
assert and_gate(1 ,0 ) == 0
assert and_gate(1 ,1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 110
| 0
|
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_UpperCamelCase = 'pt'
elif is_tf_available():
_UpperCamelCase = 'tf'
else:
_UpperCamelCase = 'jax'
class SCREAMING_SNAKE_CASE_ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
__snake_case : Optional[int] = ByTaTokenizer
__snake_case : Union[str, Any] = False
def __lowercase ( self :List[str] ):
super().setUp()
__lowerCamelCase : str =ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowercase ( self :Optional[Any] ):
return ByTaTokenizer.from_pretrained('''google/byt5-small''' )
def __lowercase ( self :Optional[int] , **__lowercase :Union[str, Any] ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase )
def __lowercase ( self :Any , __lowercase :int , __lowercase :List[Any]=False , __lowercase :List[str]=20 , __lowercase :Any=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__lowerCamelCase : List[Any] =[]
for i in range(len(__lowercase ) ):
try:
__lowerCamelCase : Any =tokenizer.decode([i] , clean_up_tokenization_spaces=__lowercase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__lowerCamelCase : Optional[Any] =list(filter(lambda __lowercase : re.match(r'''^[ a-zA-Z]+$''' , t[1] ) , __lowercase ) )
__lowerCamelCase : str =list(filter(lambda __lowercase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__lowercase ) , __lowercase ) )
if max_length is not None and len(__lowercase ) > max_length:
__lowerCamelCase : Union[str, Any] =toks[:max_length]
if min_length is not None and len(__lowercase ) < min_length and len(__lowercase ) > 0:
while len(__lowercase ) < min_length:
__lowerCamelCase : Optional[Any] =toks + toks
# toks_str = [t[1] for t in toks]
__lowerCamelCase : Tuple =[t[0] for t in toks]
# Ensure consistency
__lowerCamelCase : List[Any] =tokenizer.decode(__lowercase , clean_up_tokenization_spaces=__lowercase )
if " " not in output_txt and len(__lowercase ) > 1:
__lowerCamelCase : Optional[int] =(
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__lowercase )
+ ''' '''
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__lowercase )
)
if with_prefix_space:
__lowerCamelCase : Any =''' ''' + output_txt
__lowerCamelCase : Any =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
return output_txt, output_ids
def __lowercase ( self :str ):
__lowerCamelCase : Optional[Any] =self.ta_base_tokenizer
__lowerCamelCase : Optional[Any] =tokenizer(['''hi</s>''', '''I went to the gym</s>''', '''</s>'''] )
__lowerCamelCase : Tuple =tokenizer(['''hi''', '''I went to the gym''', ''''''] )
self.assertListEqual(batch_with_eos_added['''input_ids'''] , batch_without_eos_added['''input_ids'''] )
def __lowercase ( self :List[str] ):
__lowerCamelCase : Optional[int] =self.ta_base_tokenizer
__lowerCamelCase : List[str] ='''Unicode €.'''
__lowerCamelCase : str =tokenizer(__lowercase )
__lowerCamelCase : Union[str, Any] =[88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['''input_ids'''] , __lowercase )
# decoding
__lowerCamelCase : Dict =tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , '''Unicode €.</s>''' )
__lowerCamelCase : Union[str, Any] =tokenizer('''e è é ê ë''' )
__lowerCamelCase : Optional[Any] =[104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['''input_ids'''] , __lowercase )
# decoding
__lowerCamelCase : Any =tokenizer.decode(__lowercase )
self.assertEqual(__lowercase , '''e è é ê ë</s>''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''e è é ê ë</s>''' )
def __lowercase ( self :Tuple ):
__lowerCamelCase : Any =self.ta_base_tokenizer
__lowerCamelCase : Optional[Any] =['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
# fmt: off
__lowerCamelCase : str =[68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
__lowerCamelCase : Tuple =tokenizer(__lowercase , padding=__lowercase , return_tensors=__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
if FRAMEWORK != "jax":
__lowerCamelCase : Optional[Any] =list(batch.input_ids.numpy()[0] )
else:
__lowerCamelCase : Tuple =list(batch.input_ids.tolist()[0] )
self.assertListEqual(__lowercase , __lowercase )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __lowercase ( self :Dict ):
__lowerCamelCase : int =self.ta_base_tokenizer
__lowerCamelCase : Union[str, Any] =['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__lowerCamelCase : Optional[Any] =tokenizer(__lowercase , padding=__lowercase , return_tensors=__lowercase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , __lowercase )
self.assertIn('''attention_mask''' , __lowercase )
self.assertNotIn('''decoder_input_ids''' , __lowercase )
self.assertNotIn('''decoder_attention_mask''' , __lowercase )
def __lowercase ( self :str ):
__lowerCamelCase : Optional[Any] =self.ta_base_tokenizer
__lowerCamelCase : int =[
'''Summary of the text.''',
'''Another summary.''',
]
__lowerCamelCase : int =tokenizer(
text_target=__lowercase , max_length=32 , padding='''max_length''' , truncation=__lowercase , return_tensors=__lowercase )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def __lowercase ( self :str ):
__lowerCamelCase : int =self.ta_base_tokenizer
__lowerCamelCase : Tuple =['''A long paragraph for summarization. </s>''']
__lowerCamelCase : Union[str, Any] =['''Summary of the text. </s>''']
# fmt: off
__lowerCamelCase : List[Any] =[68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
__lowerCamelCase : Dict =[86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
__lowerCamelCase : Optional[int] =tokenizer(__lowercase , text_target=__lowercase )
self.assertEqual(__lowercase , batch['''input_ids'''][0] )
self.assertEqual(__lowercase , batch['''labels'''][0] )
def __lowercase ( self :Optional[int] ):
# safety check on max_len default value so we are sure the test works
__lowerCamelCase : List[Any] =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__lowerCamelCase : Tuple =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
__lowerCamelCase : Any =tempfile.mkdtemp()
__lowerCamelCase : Dict =''' He is very happy, UNwant\u00E9d,running'''
__lowerCamelCase : List[Any] =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
tokenizer.save_pretrained(__lowercase )
__lowerCamelCase : Optional[int] =tokenizer.__class__.from_pretrained(__lowercase )
__lowerCamelCase : Union[str, Any] =after_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
shutil.rmtree(__lowercase )
__lowerCamelCase : Union[str, Any] =self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
__lowerCamelCase : int =tempfile.mkdtemp()
__lowerCamelCase : int =''' He is very happy, UNwant\u00E9d,running'''
tokenizer.add_tokens(['''bim''', '''bambam'''] )
__lowerCamelCase : Dict =tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
__lowerCamelCase : int =tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
tokenizer.save_pretrained(__lowercase )
__lowerCamelCase : List[Any] =tokenizer.__class__.from_pretrained(__lowercase )
__lowerCamelCase : List[Any] =after_tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
self.assertListEqual(__lowercase , __lowercase )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__lowerCamelCase : Tuple =tokenizer.__class__.from_pretrained(__lowercase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__lowercase )
def __lowercase ( self :str ):
__lowerCamelCase : List[str] =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowercase )
with open(os.path.join(__lowercase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
__lowerCamelCase : str =json.load(__lowercase )
with open(os.path.join(__lowercase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
__lowerCamelCase : List[str] =json.load(__lowercase )
__lowerCamelCase : Any =[f'<extra_id_{i}>' for i in range(125 )]
__lowerCamelCase : Optional[int] =added_tokens_extra_ids + [
'''an_additional_special_token'''
]
__lowerCamelCase : List[str] =added_tokens_extra_ids + [
'''an_additional_special_token'''
]
with open(os.path.join(__lowercase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowercase , __lowercase )
with open(os.path.join(__lowercase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(__lowercase , __lowercase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__lowerCamelCase : int =tokenizer_class.from_pretrained(
__lowercase , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__lowerCamelCase : int =added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=__lowercase )]
__lowerCamelCase : List[str] =tokenizer_class.from_pretrained(
__lowercase , additional_special_tokens=__lowercase , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def __lowercase ( self :int ):
__lowerCamelCase : Dict =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__lowercase )
__lowerCamelCase : Optional[int] =tokenizer_class.from_pretrained(__lowercase )
self.assertTrue(tokenizer.decode([255] ) == '''''' )
def __lowercase ( self :Dict ):
pass
def __lowercase ( self :Optional[int] ):
pass
def __lowercase ( self :Dict ):
pass
def __lowercase ( self :Dict ):
pass
def __lowercase ( self :Union[str, Any] ):
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
__lowerCamelCase : Optional[int] =self.get_tokenizers(fast=__lowercase , do_lower_case=__lowercase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__lowerCamelCase : Any =['''t''', '''h''', '''i''', '''s''', ''' ''', '''i''', '''s''', ''' ''', '''a''', ''' ''', '''t''', '''e''', '''x''', '''t''', '''</s>''']
__lowerCamelCase : Any =tokenizer.convert_tokens_to_string(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def __lowercase ( self :str ):
__lowerCamelCase : int =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__lowerCamelCase : List[Any] =[
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
__lowerCamelCase : Tuple =0
__lowerCamelCase : List[str] =tokenizer.convert_ids_to_tokens(
__lowercase , skip_special_tokens=__lowercase )
for attr in attributes_list:
setattr(__lowercase , attr + '''_id''' , __lowercase )
self.assertEqual(getattr(__lowercase , __lowercase ) , __lowercase )
self.assertEqual(getattr(__lowercase , attr + '''_id''' ) , __lowercase )
setattr(__lowercase , attr + '''_id''' , __lowercase )
self.assertEqual(getattr(__lowercase , __lowercase ) , __lowercase )
self.assertEqual(getattr(__lowercase , attr + '''_id''' ) , __lowercase )
setattr(__lowercase , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(__lowercase , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(__lowercase , '''additional_special_tokens_ids''' ) , [] )
setattr(__lowercase , '''additional_special_tokens_ids''' , [token_id_to_test_setters] )
self.assertListEqual(getattr(__lowercase , '''additional_special_tokens''' ) , [token_to_test_setters] )
self.assertListEqual(getattr(__lowercase , '''additional_special_tokens_ids''' ) , [token_id_to_test_setters] )
| 363
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
__snake_case : Union[str, Any] = LDMTextToImagePipeline
__snake_case : Optional[Any] = TEXT_TO_IMAGE_PARAMS - {
"""negative_prompt""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
"""prompt_embeds""",
}
__snake_case : str = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
__snake_case : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
__snake_case : Optional[Any] = False
def __lowercase ( self :List[str] ):
torch.manual_seed(0 )
__lowerCamelCase : str =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
__lowerCamelCase : str =DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__lowercase , set_alpha_to_one=__lowercase , )
torch.manual_seed(0 )
__lowerCamelCase : Optional[int] =AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCamelCase : Any =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
__lowerCamelCase : Optional[int] =CLIPTextModel(__lowercase )
__lowerCamelCase : Dict =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__lowerCamelCase : Optional[int] ={
'''unet''': unet,
'''scheduler''': scheduler,
'''vqvae''': vae,
'''bert''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def __lowercase ( self :int , __lowercase :Optional[int] , __lowercase :Optional[Any]=0 ):
if str(__lowercase ).startswith('''mps''' ):
__lowerCamelCase : Any =torch.manual_seed(__lowercase )
else:
__lowerCamelCase : str =torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowerCamelCase : Any ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __lowercase ( self :List[str] ):
__lowerCamelCase : List[str] ='''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : str =self.get_dummy_components()
__lowerCamelCase : Optional[int] =LDMTextToImagePipeline(**__lowercase )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCamelCase : str =self.get_dummy_inputs(__lowercase )
__lowerCamelCase : List[Any] =pipe(**__lowercase ).images
__lowerCamelCase : Optional[int] =image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
__lowerCamelCase : Optional[Any] =np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self :Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self :int , __lowercase :Any , __lowercase :Optional[int]=torch.floataa , __lowercase :Dict=0 ):
__lowerCamelCase : List[str] =torch.manual_seed(__lowercase )
__lowerCamelCase : List[str] =np.random.RandomState(__lowercase ).standard_normal((1, 4, 32, 32) )
__lowerCamelCase : List[str] =torch.from_numpy(__lowercase ).to(device=__lowercase , dtype=__lowercase )
__lowerCamelCase : Any ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __lowercase ( self :Tuple ):
__lowerCamelCase : int =LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCamelCase : Tuple =self.get_inputs(__lowercase )
__lowerCamelCase : Optional[Any] =pipe(**__lowercase ).images
__lowerCamelCase : Union[str, Any] =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
__lowerCamelCase : Union[str, Any] =np.array([0.51825, 0.52850, 0.52543, 0.54258, 0.52304, 0.52569, 0.54363, 0.55276, 0.56878] )
__lowerCamelCase : Dict =np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self :Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self :Dict , __lowercase :Optional[Any] , __lowercase :int=torch.floataa , __lowercase :Dict=0 ):
__lowerCamelCase : Any =torch.manual_seed(__lowercase )
__lowerCamelCase : Dict =np.random.RandomState(__lowercase ).standard_normal((1, 4, 32, 32) )
__lowerCamelCase : str =torch.from_numpy(__lowercase ).to(device=__lowercase , dtype=__lowercase )
__lowerCamelCase : Dict ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 50,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __lowercase ( self :Tuple ):
__lowerCamelCase : Optional[int] =LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCamelCase : List[Any] =self.get_inputs(__lowercase )
__lowerCamelCase : Optional[int] =pipe(**__lowercase ).images[0]
__lowerCamelCase : Optional[int] =load_numpy(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy''' )
__lowerCamelCase : Dict =np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 363
| 1
|
'''simple docstring'''
__lowercase : Dict = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def lowerCamelCase (_SCREAMING_SNAKE_CASE : bytes ):
# Make sure the supplied data is a bytes-like object
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : str = F"""a bytes-like object is required, not '{data.__class__.__name__}'"""
raise TypeError(_SCREAMING_SNAKE_CASE )
__a : List[str] = ''.join(bin(_SCREAMING_SNAKE_CASE )[2:].zfill(8 ) for byte in data )
__a : str = len(_SCREAMING_SNAKE_CASE ) % 6 != 0
if padding_needed:
# The padding that will be added later
__a : Union[str, Any] = B'=' * ((6 - len(_SCREAMING_SNAKE_CASE ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_SCREAMING_SNAKE_CASE ) % 6)
else:
__a : List[str] = B''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_SCREAMING_SNAKE_CASE ) , 6 ) ).encode()
+ padding
)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__a : int = (
'argument should be a bytes-like object or ASCII string, '
F"""not '{encoded_data.__class__.__name__}'"""
)
raise TypeError(_SCREAMING_SNAKE_CASE )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
try:
__a : Optional[Any] = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
__a : Tuple = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_SCREAMING_SNAKE_CASE ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__a : List[str] = encoded_data[:-padding]
__a : int = ''.join(
bin(B64_CHARSET.index(_SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__a : Dict = ''.join(
bin(B64_CHARSET.index(_SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )
__a : Union[str, Any] = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_SCREAMING_SNAKE_CASE ) , 8 )
]
return bytes(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 476
|
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__lowercase : Tuple = 16
__lowercase : int = 32
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
return int(x / 2**20 )
class __UpperCamelCase :
def __enter__( self ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
__a : Union[str, Any] = torch.cuda.memory_allocated()
return self
def __exit__( self , *__a ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
__a : List[str] = torch.cuda.memory_allocated()
__a : Union[str, Any] = torch.cuda.max_memory_allocated()
__a : int = bamb(self.end - self.begin )
__a : Tuple = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 , _SCREAMING_SNAKE_CASE : str = "bert-base-cased" , _SCREAMING_SNAKE_CASE : int = 320 , _SCREAMING_SNAKE_CASE : int = 160 , ):
__a : Union[str, Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
__a : int = load_dataset(
'glue' , 'mrpc' , split={'train': F"""train[:{n_train}]""", 'validation': F"""validation[:{n_val}]"""} )
def tokenize_function(_SCREAMING_SNAKE_CASE : List[str] ):
# max_length=None => use the model max length (it's actually the default)
__a : Optional[int] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__a : Dict = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_SCREAMING_SNAKE_CASE )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__a : List[Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_SCREAMING_SNAKE_CASE : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
__a : Any = DataLoader(
tokenized_datasets['train'] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
__a : List[str] = DataLoader(
tokenized_datasets['validation'] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] ):
# Initialize accelerator
__a : Optional[int] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__a : Tuple = config['lr']
__a : List[Any] = int(config['num_epochs'] )
__a : List[Any] = int(config['seed'] )
__a : List[str] = int(config['batch_size'] )
__a : Optional[Any] = args.model_name_or_path
set_seed(_SCREAMING_SNAKE_CASE )
__a , __a : Dict = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__a : Tuple = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
# Instantiate optimizer
__a : Optional[int] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__a : str = optimizer_cls(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
if accelerator.state.deepspeed_plugin is not None:
__a : int = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
__a : Optional[Any] = 1
__a : List[Any] = (len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__a : int = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=0 , num_training_steps=_SCREAMING_SNAKE_CASE , )
else:
__a : Dict = DummyScheduler(_SCREAMING_SNAKE_CASE , total_num_steps=_SCREAMING_SNAKE_CASE , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__a , __a , __a , __a , __a : str = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# We need to keep track of how many total steps we have iterated over
__a : List[str] = 0
# We also need to keep track of the stating epoch so files are named properly
__a : Dict = 0
# Now we train the model
__a : Optional[Any] = {}
for epoch in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
__a : int = model(**_SCREAMING_SNAKE_CASE )
__a : str = outputs.loss
__a : Dict = loss / gradient_accumulation_steps
accelerator.backward(_SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
__a : int = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
__a : List[str] = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=_SCREAMING_SNAKE_CASE , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_SCREAMING_SNAKE_CASE , )
parser.add_argument(
'--output_dir' , type=_SCREAMING_SNAKE_CASE , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--peak_memory_upper_bound' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , )
parser.add_argument(
'--n_train' , type=_SCREAMING_SNAKE_CASE , default=320 , help='Number of training examples to use.' , )
parser.add_argument(
'--n_val' , type=_SCREAMING_SNAKE_CASE , default=160 , help='Number of validation examples to use.' , )
parser.add_argument(
'--num_epochs' , type=_SCREAMING_SNAKE_CASE , default=1 , help='Number of train epochs.' , )
__a : List[Any] = parser.parse_args()
__a : str = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 476
| 1
|
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class A_ :
"""simple docstring"""
def __init__( self : Optional[int] ,__A : List[str] ,) -> int:
_lowercase = parent
_lowercase = 13
_lowercase = 7
_lowercase = True
_lowercase = True
_lowercase = True
_lowercase = 99
_lowercase = 32
_lowercase = 2
_lowercase = 4
_lowercase = 37
_lowercase = 'gelu'
_lowercase = 0.1
_lowercase = 0.1
_lowercase = 512
_lowercase = 16
_lowercase = 2
_lowercase = 0.02
_lowercase = 3
_lowercase = 4
_lowercase = None
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
_lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_lowercase = None
if self.use_input_mask:
_lowercase = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase = None
_lowercase = None
_lowercase = None
if self.use_labels:
_lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_lowercase = ids_tensor([self.batch_size] ,self.num_choices )
_lowercase = EsmConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,pad_token_id=1 ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) = self.prepare_config_and_inputs()
_lowercase = True
_lowercase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowercase = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __UpperCAmelCase ( self : Union[str, Any] ,__A : Optional[int] ,__A : Union[str, Any] ,__A : Dict ,__A : Tuple ,__A : int ,__A : Tuple ) -> int:
_lowercase = TFEsmModel(config=_SCREAMING_SNAKE_CASE )
_lowercase = {'input_ids': input_ids, 'attention_mask': input_mask}
_lowercase = model(_SCREAMING_SNAKE_CASE )
_lowercase = [input_ids, input_mask]
_lowercase = model(_SCREAMING_SNAKE_CASE )
_lowercase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Dict ,__A : List[str] ,__A : Tuple ,__A : Union[str, Any] ,__A : Any ,__A : Optional[Any] ,__A : Tuple ,__A : List[Any] ,__A : List[str] ,) -> Tuple:
_lowercase = True
_lowercase = TFEsmModel(config=_SCREAMING_SNAKE_CASE )
_lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
_lowercase = model(_SCREAMING_SNAKE_CASE )
_lowercase = [input_ids, input_mask]
_lowercase = model(_SCREAMING_SNAKE_CASE ,encoder_hidden_states=_SCREAMING_SNAKE_CASE )
# Also check the case where encoder outputs are not passed
_lowercase = model(_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Union[str, Any] ,__A : List[Any] ,__A : Optional[int] ,__A : int ,__A : int ,__A : List[Any] ,__A : str ) -> Any:
_lowercase = TFEsmForMaskedLM(config=_SCREAMING_SNAKE_CASE )
_lowercase = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self : List[str] ,__A : Tuple ,__A : Tuple ,__A : Union[str, Any] ,__A : str ,__A : Union[str, Any] ,__A : List[str] ) -> Optional[int]:
_lowercase = self.num_labels
_lowercase = TFEsmForTokenClassification(config=_SCREAMING_SNAKE_CASE )
_lowercase = {'input_ids': input_ids, 'attention_mask': input_mask}
_lowercase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self : Any ) -> List[Any]:
_lowercase = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) = config_and_inputs
_lowercase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class A_ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE_ : Tuple = (
{
"feature-extraction": TFEsmModel,
"fill-mask": TFEsmForMaskedLM,
"text-classification": TFEsmForSequenceClassification,
"token-classification": TFEsmForTokenClassification,
"zero-shot": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Any = False
SCREAMING_SNAKE_CASE_ : str = False
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
_lowercase = TFEsmModelTester(self )
_lowercase = ConfigTester(self ,config_class=_SCREAMING_SNAKE_CASE ,hidden_size=37 )
def __UpperCAmelCase ( self : int ) -> Dict:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self : str ) -> Dict:
_lowercase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self : Dict ) -> str:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE )
@slow
def __UpperCAmelCase ( self : List[str] ) -> Any:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase = TFEsmModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@unittest.skip('Protein models do not support embedding resizing.' )
def __UpperCAmelCase ( self : Dict ) -> str:
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
pass
def __UpperCAmelCase ( self : Optional[int] ) -> int:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(_SCREAMING_SNAKE_CASE )
assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
_lowercase = model.get_bias()
assert isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
for k, v in name.items():
assert isinstance(_SCREAMING_SNAKE_CASE ,tf.Variable )
else:
_lowercase = model.get_output_embeddings()
assert x is None
_lowercase = model.get_bias()
assert name is None
@require_tf
class A_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
_lowercase = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
_lowercase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_lowercase = model(_SCREAMING_SNAKE_CASE )[0]
_lowercase = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) ,_SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
_lowercase = tf.constant(
[
[
[8.921518, -10.58_9814, -6.4671307],
[-6.3967156, -13.91_1377, -1.1211915],
[-7.781247, -13.95_1557, -3.740592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-2 ) )
@slow
def __UpperCAmelCase ( self : Any ) -> int:
_lowercase = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
_lowercase = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_lowercase = model(_SCREAMING_SNAKE_CASE )[0]
# compare the actual values for a slice.
_lowercase = tf.constant(
[
[
[0.14443092, 0.54125327, 0.3247739],
[0.30340484, 0.00526676, 0.31077722],
[0.32278043, -0.24987096, 0.3414628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 709
|
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
snake_case = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class A_ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = PegasusTokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = PegasusTokenizerFast
SCREAMING_SNAKE_CASE_ : Any = True
SCREAMING_SNAKE_CASE_ : Optional[int] = True
def __UpperCAmelCase ( self : List[str] ) -> Any:
super().setUp()
# We have a SentencePiece fixture for testing
_lowercase = PegasusTokenizer(__A )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]:
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def __UpperCAmelCase ( self : Any ,**__A : int ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname ,**__A )
def __UpperCAmelCase ( self : Union[str, Any] ,__A : int ) -> List[str]:
return ("This is a test", "This is a test")
def __UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
_lowercase = '</s>'
_lowercase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__A ) ,__A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__A ) ,__A )
def __UpperCAmelCase ( self : List[Any] ) -> Optional[Any]:
_lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<pad>' )
self.assertEqual(vocab_keys[1] ,'</s>' )
self.assertEqual(vocab_keys[-1] ,'v' )
self.assertEqual(len(__A ) ,1103 )
def __UpperCAmelCase ( self : Tuple ) -> Any:
self.assertEqual(self.get_tokenizer().vocab_size ,1103 )
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
_lowercase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_lowercase = self.tokenizer_class.from_pretrained(self.tmpdirname )
_lowercase = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
_lowercase = rust_tokenizer([raw_input_str] ,return_tensors=__A ,add_special_tokens=__A ).input_ids[0]
_lowercase = py_tokenizer([raw_input_str] ,return_tensors=__A ,add_special_tokens=__A ).input_ids[0]
self.assertListEqual(__A ,__A )
def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
_lowercase = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_lowercase = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
_lowercase = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
_lowercase = tokenizer([raw_input_str] ,return_tensors=__A ).input_ids[0]
self.assertListEqual(__A ,__A )
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
_lowercase = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
_lowercase = 'To ensure a smooth flow of bank resolutions.'
_lowercase = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
_lowercase = tokenizer([raw_input_str] ,return_tensors=__A ).input_ids[0]
self.assertListEqual(__A ,__A )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
_lowercase = ['This is going to be way too long.' * 150, 'short example']
_lowercase = ['not super long but more than 5 tokens', 'tiny']
_lowercase = self._large_tokenizer(__A ,padding=__A ,truncation=__A ,return_tensors='pt' )
_lowercase = self._large_tokenizer(
text_target=__A ,max_length=5 ,padding=__A ,truncation=__A ,return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(__A ) == 2 # input_ids, attention_mask.
@slow
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
# fmt: off
_lowercase = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__A ,model_name='google/bigbird-pegasus-large-arxiv' ,revision='ba85d0851d708441f91440d509690f1ab6353415' ,)
@require_sentencepiece
@require_tokenizers
class A_ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = PegasusTokenizer
SCREAMING_SNAKE_CASE_ : int = PegasusTokenizerFast
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : List[Any] = True
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
super().setUp()
# We have a SentencePiece fixture for testing
_lowercase = PegasusTokenizer(__A ,offset=0 ,mask_token_sent=__A ,mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def __UpperCAmelCase ( self : Union[str, Any] ,**__A : Union[str, Any] ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname ,**__A )
def __UpperCAmelCase ( self : Union[str, Any] ,__A : int ) -> Tuple:
return ("This is a test", "This is a test")
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
_lowercase = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_lowercase = self.tokenizer_class.from_pretrained(self.tmpdirname )
_lowercase = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
_lowercase = rust_tokenizer([raw_input_str] ,return_tensors=__A ,add_special_tokens=__A ).input_ids[0]
_lowercase = py_tokenizer([raw_input_str] ,return_tensors=__A ,add_special_tokens=__A ).input_ids[0]
self.assertListEqual(__A ,__A )
@require_torch
def __UpperCAmelCase ( self : List[str] ) -> Dict:
_lowercase = ['This is going to be way too long.' * 1000, 'short example']
_lowercase = ['not super long but more than 5 tokens', 'tiny']
_lowercase = self._large_tokenizer(__A ,padding=__A ,truncation=__A ,return_tensors='pt' )
_lowercase = self._large_tokenizer(
text_target=__A ,max_length=5 ,padding=__A ,truncation=__A ,return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(__A ) == 2 # input_ids, attention_mask.
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
_lowercase = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
_lowercase = self._large_tokenizer(__A ).input_ids
self.assertListEqual(
__A ,[182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] ,)
| 535
| 0
|
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def A__ ( A__ ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = image.size
_UpperCAmelCase , _UpperCAmelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_UpperCAmelCase = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
_UpperCAmelCase = np.array(A__ ).astype(np.floataa ) / 255.0
_UpperCAmelCase = image[None].transpose(0 , 3 , 1 , 2 )
_UpperCAmelCase = torch.from_numpy(A__ )
return 2.0 * image - 1.0
class a ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_ , snake_case_ , ) -> int:
super().__init__()
self.register_modules(vqvae=snake_case_ , unet=snake_case_ , scheduler=snake_case_ )
@torch.no_grad()
def __call__( self , snake_case_ = None , snake_case_ = 1 , snake_case_ = 100 , snake_case_ = 0.0 , snake_case_ = None , snake_case_ = "pil" , snake_case_ = True , ) -> Union[Tuple, ImagePipelineOutput]:
if isinstance(snake_case_ , PIL.Image.Image ):
_UpperCAmelCase = 1
elif isinstance(snake_case_ , torch.Tensor ):
_UpperCAmelCase = image.shape[0]
else:
raise ValueError(F"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(snake_case_ )}""" )
if isinstance(snake_case_ , PIL.Image.Image ):
_UpperCAmelCase = preprocess(snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
_UpperCAmelCase = (batch_size, self.unet.config.in_channels // 2, height, width)
_UpperCAmelCase = next(self.unet.parameters() ).dtype
_UpperCAmelCase = randn_tensor(snake_case_ , generator=snake_case_ , device=self.device , dtype=snake_case_ )
_UpperCAmelCase = image.to(device=self.device , dtype=snake_case_ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(snake_case_ , device=self.device )
_UpperCAmelCase = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
_UpperCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_UpperCAmelCase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_UpperCAmelCase = {}
if accepts_eta:
_UpperCAmelCase = eta
for t in self.progress_bar(snake_case_ ):
# concat latents and low resolution image in the channel dimension.
_UpperCAmelCase = torch.cat([latents, image] , dim=1 )
_UpperCAmelCase = self.scheduler.scale_model_input(snake_case_ , snake_case_ )
# predict the noise residual
_UpperCAmelCase = self.unet(snake_case_ , snake_case_ ).sample
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
# decode the image latents with the VQVAE
_UpperCAmelCase = self.vqvae.decode(snake_case_ ).sample
_UpperCAmelCase = torch.clamp(snake_case_ , -1.0 , 1.0 )
_UpperCAmelCase = image / 2 + 0.5
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(snake_case_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case_ )
| 426
|
"""simple docstring"""
import math
def A__ ( A__ , A__ ) -> float:
'''simple docstring'''
if initial_intensity < 0:
raise ValueError("The value of intensity cannot be negative" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("In Malus Law, the angle is in the range 0-360 degrees" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(A__ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='''malus_law''')
| 426
| 1
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
__A : List[Any] = (3, 9, -11, 0, 7, 5, 1, -1)
__A : List[str] = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class _UpperCAmelCase :
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : Node | None
class _UpperCAmelCase :
def __init__( self : Tuple , A : Iterable[int] ) -> None:
lowercase_ : Node | None = None
for i in sorted(A , reverse=A ):
lowercase_ : Optional[int] = Node(A , self.head )
def __iter__( self : List[str] ) -> Iterator[int]:
lowercase_ : str = self.head
while node:
yield node.data
lowercase_ : Any = node.next_node
def __len__( self : Dict ) -> int:
return sum(1 for _ in self )
def __str__( self : List[str] ) -> str:
return " -> ".join([str(A ) for node in self] )
def lowercase ( __snake_case : SortedLinkedList , __snake_case : SortedLinkedList ):
return SortedLinkedList(list(__snake_case ) + list(__snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : List[Any] = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 141
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : Optional[int] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__A : List[str] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.cross_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.cross_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qcontent_proj.weight""", F"""decoder.layers.{i}.sa_qcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kcontent_proj.weight""", F"""decoder.layers.{i}.sa_kcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qpos_proj.weight""", F"""decoder.layers.{i}.sa_qpos_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kpos_proj.weight""", F"""decoder.layers.{i}.sa_kpos_proj.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_v_proj.weight""", F"""decoder.layers.{i}.sa_v_proj.weight"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qcontent_proj.weight""", F"""decoder.layers.{i}.ca_qcontent_proj.weight""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kcontent_proj.weight""", F"""decoder.layers.{i}.ca_kcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kpos_proj.weight""", F"""decoder.layers.{i}.ca_kpos_proj.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_v_proj.weight""", F"""decoder.layers.{i}.ca_v_proj.weight"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight""", F"""decoder.layers.{i}.ca_qpos_sine_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qcontent_proj.bias""", F"""decoder.layers.{i}.sa_qcontent_proj.bias""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kcontent_proj.bias""", F"""decoder.layers.{i}.sa_kcontent_proj.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_qpos_proj.bias""", F"""decoder.layers.{i}.sa_qpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_kpos_proj.bias""", F"""decoder.layers.{i}.sa_kpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_v_proj.bias""", F"""decoder.layers.{i}.sa_v_proj.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qcontent_proj.bias""", F"""decoder.layers.{i}.ca_qcontent_proj.bias""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kcontent_proj.bias""", F"""decoder.layers.{i}.ca_kcontent_proj.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_kpos_proj.bias""", F"""decoder.layers.{i}.ca_kpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_v_proj.bias""", F"""decoder.layers.{i}.ca_v_proj.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias""", F"""decoder.layers.{i}.ca_qpos_sine_proj.bias""")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def lowercase ( __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Optional[Any] ):
lowercase_ : Any = state_dict.pop(__snake_case )
lowercase_ : List[Any] = val
def lowercase ( __snake_case : Any ):
lowercase_ : int = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowercase_ : int = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
lowercase_ : Dict = value
else:
lowercase_ : Tuple = value
return new_state_dict
def lowercase ( __snake_case : List[str] , __snake_case : Any=False ):
lowercase_ : Optional[int] = ''''''
if is_panoptic:
lowercase_ : Optional[int] = '''conditional_detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowercase_ : List[str] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
lowercase_ : List[str] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase_ : Union[str, Any] = in_proj_weight[:2_5_6, :]
lowercase_ : Tuple = in_proj_bias[:2_5_6]
lowercase_ : Optional[Any] = in_proj_weight[2_5_6:5_1_2, :]
lowercase_ : str = in_proj_bias[2_5_6:5_1_2]
lowercase_ : str = in_proj_weight[-2_5_6:, :]
lowercase_ : Tuple = in_proj_bias[-2_5_6:]
def lowercase ( ):
lowercase_ : List[str] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase_ : Optional[int] = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def lowercase ( __snake_case : str , __snake_case : List[Any] ):
lowercase_ : List[str] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
lowercase_ : Optional[Any] = '''resnet101'''
if "dc5" in model_name:
lowercase_ : Any = True
lowercase_ : int = '''panoptic''' in model_name
if is_panoptic:
lowercase_ : List[Any] = 2_5_0
else:
lowercase_ : List[Any] = 9_1
lowercase_ : List[str] = '''huggingface/label-files'''
lowercase_ : Union[str, Any] = '''coco-detection-id2label.json'''
lowercase_ : Optional[Any] = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='''dataset''' ) , '''r''' ) )
lowercase_ : Union[str, Any] = {int(__snake_case ): v for k, v in idalabel.items()}
lowercase_ : Any = idalabel
lowercase_ : Any = {v: k for k, v in idalabel.items()}
# load image processor
lowercase_ : Optional[int] = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
lowercase_ : Tuple = ConditionalDetrImageProcessor(format=__snake_case )
# prepare image
lowercase_ : int = prepare_img()
lowercase_ : Dict = image_processor(images=__snake_case , return_tensors='''pt''' )
lowercase_ : List[str] = encoding['''pixel_values''']
logger.info(F'''Converting model {model_name}...''' )
# load original model from torch hub
lowercase_ : Dict = torch.hub.load('''DeppMeng/ConditionalDETR''' , __snake_case , pretrained=__snake_case ).eval()
lowercase_ : int = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
lowercase_ : Union[str, Any] = '''conditional_detr.''' + src
rename_key(__snake_case , __snake_case , __snake_case )
lowercase_ : int = rename_backbone_keys(__snake_case )
# query, key and value matrices need special treatment
read_in_q_k_v(__snake_case , is_panoptic=__snake_case )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowercase_ : List[Any] = '''conditional_detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''conditional_detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
lowercase_ : Optional[int] = state_dict.pop(__snake_case )
lowercase_ : List[Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowercase_ : str = state_dict.pop(__snake_case )
lowercase_ : str = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
lowercase_ : Dict = state_dict.pop(__snake_case )
lowercase_ : Tuple = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
lowercase_ : Tuple = state_dict.pop(__snake_case )
lowercase_ : List[Any] = val
# finally, create HuggingFace model and load state dict
lowercase_ : Dict = ConditionalDetrForSegmentation(__snake_case ) if is_panoptic else ConditionalDetrForObjectDetection(__snake_case )
model.load_state_dict(__snake_case )
model.eval()
model.push_to_hub(repo_id=__snake_case , organization='''DepuMeng''' , commit_message='''Add model''' )
# verify our conversion
lowercase_ : Optional[int] = conditional_detr(__snake_case )
lowercase_ : List[str] = model(__snake_case )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1e-4 )
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
model.save_pretrained(__snake_case )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
__A : str = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
__A : Any = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 141
| 1
|
'''simple docstring'''
from __future__ import annotations
def _A ( snake_case__ : list[int] , snake_case__ : list[int] , snake_case__ : list[int] , snake_case__ : list[list[str]] , snake_case__ : int , ):
snake_case__ : Any = len(snake_case__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(snake_case__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , snake_case__ , snake_case__ , )
def _A ( snake_case__ : int ):
snake_case__ : list[list[str]] = []
depth_first_search([] , [] , [] , snake_case__ , snake_case__ )
# Print all the boards
for board in boards:
for column in board:
print(snake_case__ )
print('''''' )
print(len(snake_case__ ) , '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 261
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class snake_case :
"""simple docstring"""
def __init__( self , lowerCamelCase ) -> int:
"""simple docstring"""
snake_case__ : Any = data
snake_case__ : Node | None = None
class snake_case :
"""simple docstring"""
def __init__( self ) -> List[str]:
"""simple docstring"""
snake_case__ : Union[str, Any] = None
snake_case__ : int = None
def __iter__( self ) -> Iterator[Any]:
"""simple docstring"""
snake_case__ : Dict = self.head
while self.head:
yield node.data
snake_case__ : str = node.next
if node == self.head:
break
def __len__( self ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self ) -> Optional[int]:
"""simple docstring"""
return "->".join(str(lowerCamelCase ) for item in iter(self ) )
def lowercase__ ( self , lowerCamelCase ) -> None:
"""simple docstring"""
self.insert_nth(len(self ) , lowerCamelCase )
def lowercase__ ( self , lowerCamelCase ) -> None:
"""simple docstring"""
self.insert_nth(0 , lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> None:
"""simple docstring"""
if index < 0 or index > len(self ):
raise IndexError('''list index out of range.''' )
snake_case__ : Optional[int] = Node(lowerCamelCase )
if self.head is None:
snake_case__ : Tuple = new_node # first node points itself
snake_case__ : Union[str, Any] = new_node
elif index == 0: # insert at head
snake_case__ : Any = self.head
snake_case__ : Any = new_node
else:
snake_case__ : Optional[Any] = self.head
for _ in range(index - 1 ):
snake_case__ : List[Any] = temp.next
snake_case__ : Dict = temp.next
snake_case__ : str = new_node
if index == len(self ) - 1: # insert at tail
snake_case__ : Optional[Any] = new_node
def lowercase__ ( self ) -> int:
"""simple docstring"""
return self.delete_nth(0 )
def lowercase__ ( self ) -> Any:
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def lowercase__ ( self , lowerCamelCase = 0 ) -> Any:
"""simple docstring"""
if not 0 <= index < len(self ):
raise IndexError('''list index out of range.''' )
snake_case__ : Union[str, Any] = self.head
if self.head == self.tail: # just one node
snake_case__ : int = None
elif index == 0: # delete head node
snake_case__ : Dict = self.tail.next.next
snake_case__ : int = self.head.next
else:
snake_case__ : Dict = self.head
for _ in range(index - 1 ):
snake_case__ : Any = temp.next
snake_case__ : Dict = temp.next
snake_case__ : str = temp.next.next
if index == len(self ) - 1: # delete at tail
snake_case__ : List[Any] = temp
return delete_node.data
def lowercase__ ( self ) -> bool:
"""simple docstring"""
return len(self ) == 0
def _A ( ):
snake_case__ : int = CircularLinkedList()
assert len(snake_case__ ) == 0
assert circular_linked_list.is_empty() is True
assert str(snake_case__ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(snake_case__ ) == i
circular_linked_list.insert_nth(snake_case__ , i + 1 )
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 261
| 1
|
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def UpperCamelCase_ ( lowerCAmelCase__ = "isbn/0140328726" ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = olid.strip().strip("/" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("/" ) != 1:
_lowerCAmelCase : str = f"""{olid} is not a valid Open Library olid"""
raise ValueError(lowerCAmelCase__ )
return requests.get(f"""https://openlibrary.org/{new_olid}.json""" ).json()
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : List[str] = {
"title": "Title",
"publish_date": "Publish date",
"authors": "Authors",
"number_of_pages": "Number of pages:",
"first_sentence": "First sentence",
"isbn_10": "ISBN (10)",
"isbn_13": "ISBN (13)",
}
_lowerCAmelCase : Dict = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
_lowerCAmelCase : Optional[Any] = [
get_openlibrary_data(author["key"] )["name"] for author in data["Authors"]
]
_lowerCAmelCase : List[str] = data["First sentence"]["value"]
for key, value in data.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_lowerCAmelCase : List[str] = ", ".join(lowerCAmelCase__ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
snake_case = input("\nEnter the ISBN code to search (or 'quit' to stop): ").strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(F'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
snake_case = summarize_book(get_openlibrary_data(F'''isbn/{isbn}'''))
print("\n".join(F'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F'''Sorry, there are no results for ISBN: {isbn}.''')
| 587
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __A ( snake_case__ ,snake_case__ ,unittest.TestCase ):
'''simple docstring'''
a_ = StableDiffusionXLImgaImgPipeline
a_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
a_ = PipelineTesterMixin.required_optional_params - {'''latents'''}
a_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE__ ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , attention_head_dim=(2, 4) , use_linear_projection=_snake_case , addition_embed_type="text_time" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
_lowerCAmelCase : Union[str, Any] = EulerDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , steps_offset=1 , beta_schedule="scaled_linear" , timestep_spacing="leading" , )
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_lowerCAmelCase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=32 , )
_lowerCAmelCase : Optional[Any] = CLIPTextModel(_snake_case )
_lowerCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=_snake_case )
_lowerCAmelCase : Optional[int] = CLIPTextModelWithProjection(_snake_case )
_lowerCAmelCase : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=_snake_case )
_lowerCAmelCase : str = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_encoder_2": text_encoder_a,
"tokenizer_2": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case=0 ):
_lowerCAmelCase : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_snake_case ) ).to(_snake_case )
_lowerCAmelCase : str = image / 2 + 0.5
if str(_snake_case ).startswith("mps" ):
_lowerCAmelCase : str = torch.manual_seed(_snake_case )
else:
_lowerCAmelCase : str = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
_lowerCAmelCase : int = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"output_type": "numpy",
"strength": 0.75,
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : List[Any] = self.get_dummy_components()
_lowerCAmelCase : str = StableDiffusionXLImgaImgPipeline(**_snake_case )
_lowerCAmelCase : List[str] = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase : Union[str, Any] = sd_pipe(**_snake_case ).images
_lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCAmelCase : Dict = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE__ ( self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def SCREAMING_SNAKE_CASE__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Optional[Any] = StableDiffusionXLImgaImgPipeline(**_snake_case )
_lowerCAmelCase : int = sd_pipe.to(_snake_case )
_lowerCAmelCase : Any = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
# forward without prompt embeds
_lowerCAmelCase : Dict = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase : str = 3 * ["this is a negative prompt"]
_lowerCAmelCase : str = negative_prompt
_lowerCAmelCase : Dict = 3 * [inputs["prompt"]]
_lowerCAmelCase : Tuple = sd_pipe(**_snake_case )
_lowerCAmelCase : Optional[Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
_lowerCAmelCase : Optional[Any] = self.get_dummy_inputs(_snake_case )
_lowerCAmelCase : int = 3 * ["this is a negative prompt"]
_lowerCAmelCase : List[str] = 3 * [inputs.pop("prompt" )]
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Optional[Any] = sd_pipe.encode_prompt(_snake_case , negative_prompt=_snake_case )
_lowerCAmelCase : int = sd_pipe(
**_snake_case , prompt_embeds=_snake_case , negative_prompt_embeds=_snake_case , pooled_prompt_embeds=_snake_case , negative_pooled_prompt_embeds=_snake_case , )
_lowerCAmelCase : Union[str, Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case="cpu" , _snake_case=torch.floataa , _snake_case=0 ):
_lowerCAmelCase : List[str] = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
_lowerCAmelCase : Any = np.random.RandomState(_snake_case ).standard_normal((1, 4, 64, 64) )
_lowerCAmelCase : Tuple = torch.from_numpy(_snake_case ).to(device=_snake_case , dtype=_snake_case )
_lowerCAmelCase : Optional[int] = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Dict = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base" )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
_lowerCAmelCase : int = self.get_inputs(_snake_case )
_lowerCAmelCase : List[str] = pipe(**_snake_case ).images
_lowerCAmelCase : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : Any = np.array([0.4_9493, 0.4_7896, 0.4_0798, 0.5_4214, 0.5_3212, 0.4_8202, 0.4_7656, 0.4_6329, 0.4_8506] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 587
| 1
|
SCREAMING_SNAKE_CASE : Dict = "Input must be a string of 8 numbers plus letter"
SCREAMING_SNAKE_CASE : List[str] = "TRWAGMYFPDXBNJZSQVHLCKE"
def UpperCamelCase_( lowerCamelCase_ ) -> bool:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_lowercase : List[Any] = F'''Expected string as input, found {type(lowerCamelCase_ ).__name__}'''
raise TypeError(lowerCamelCase_ )
_lowercase : Optional[Any] = spanish_id.replace('-' , '' ).upper()
if len(lowerCamelCase_ ) != 9:
raise ValueError(lowerCamelCase_ )
try:
_lowercase : Dict = int(spanish_id_clean[0:8] )
_lowercase : str = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(lowerCamelCase_ ) from ex
if letter.isdigit():
raise ValueError(lowerCamelCase_ )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89
|
from typing import List, Optional
from tokenizers import ByteLevelBPETokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
UpperCAmelCase = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
UpperCAmelCase = {
'''facebook/blenderbot_small-90M''': 512,
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Dict = VOCAB_FILES_NAMES
_UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : str = BlenderbotSmallTokenizer
def __init__( self , snake_case=None , snake_case=None , snake_case="<|endoftext|>" , snake_case="<|endoftext|>" , snake_case="<|endoftext|>" , snake_case=False , snake_case=True , **snake_case , ):
super().__init__(
ByteLevelBPETokenizer(
vocab=snake_case , merges=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , ) , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , **snake_case , )
lowercase = add_prefix_space
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=None ):
lowercase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 84
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "markuplm"
def __init__( self : Optional[int] , _A : Dict=3_0522 , _A : Union[str, Any]=768 , _A : Dict=12 , _A : Union[str, Any]=12 , _A : Optional[int]=3072 , _A : List[str]="gelu" , _A : Optional[Any]=0.1 , _A : str=0.1 , _A : List[str]=512 , _A : Optional[int]=2 , _A : Optional[Any]=0.02 , _A : Dict=1e-12 , _A : Dict=0 , _A : List[str]=0 , _A : Any=2 , _A : Tuple=256 , _A : Tuple=1024 , _A : str=216 , _A : str=1001 , _A : Any=32 , _A : Any=50 , _A : Optional[Any]="absolute" , _A : Tuple=True , _A : Any=None , **_A : Any , ):
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A , )
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = hidden_act
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = initializer_range
_UpperCamelCase = layer_norm_eps
_UpperCamelCase = position_embedding_type
_UpperCamelCase = use_cache
_UpperCamelCase = classifier_dropout
# additional properties
_UpperCamelCase = max_depth
_UpperCamelCase = max_xpath_tag_unit_embeddings
_UpperCamelCase = max_xpath_subs_unit_embeddings
_UpperCamelCase = tag_pad_id
_UpperCamelCase = subs_pad_id
_UpperCamelCase = xpath_unit_hidden_size
| 71
|
def _snake_case ( __snake_case , __snake_case , __snake_case ):
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(__snake_case , n - 1 , __snake_case ) * a) % mod
else:
_UpperCamelCase = binary_exponentiation(__snake_case , n / 2 , __snake_case )
return (b * b) % mod
# a prime number
_lowerCAmelCase = 701
_lowerCAmelCase = 1_000_000_000
_lowerCAmelCase = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 71
| 1
|
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
__a = logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
__a = {
"""base""": AutoModel,
"""sequence-classification""": AutoModelForSequenceClassification,
"""question-answering""": AutoModelForQuestionAnswering,
"""pretraining""": AutoModelForPreTraining,
"""token-classification""": AutoModelForTokenClassification,
"""language-modeling""": AutoModelWithLMHead,
"""summarization""": AutoModelForSeqaSeqLM,
"""translation""": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
__a = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
__a = sorted(arg_to_scheduler.keys())
__a = """{""" + """, """.join(arg_to_scheduler_choices) + """}"""
class __a( pl.LightningModule ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE="base" ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,**_SCREAMING_SNAKE_CASE ,) -> Optional[int]:
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(__lowerCAmelCase )
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : Any = Path(self.hparams.output_dir )
UpperCAmelCase_ : Optional[Any] = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
UpperCAmelCase_ : Optional[Any] = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path ,**({'''num_labels''': num_labels} if num_labels is not None else {}) ,cache_dir=__lowerCAmelCase ,**__lowerCAmelCase ,)
else:
UpperCAmelCase_ : List[Any] = config
UpperCAmelCase_ : str = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(self.hparams ,__lowerCAmelCase ,__lowerCAmelCase ):
assert hasattr(self.config ,__lowerCAmelCase ), f'''model config doesn\'t have a `{p}` attribute'''
setattr(self.config ,__lowerCAmelCase ,getattr(self.hparams ,__lowerCAmelCase ) )
if tokenizer is None:
UpperCAmelCase_ : Any = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path ,cache_dir=__lowerCAmelCase ,)
else:
UpperCAmelCase_ : int = tokenizer
UpperCAmelCase_ : Union[str, Any] = MODEL_MODES[mode]
if model is None:
UpperCAmelCase_ : str = self.model_type.from_pretrained(
self.hparams.model_name_or_path ,from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) ,config=self.config ,cache_dir=__lowerCAmelCase ,)
else:
UpperCAmelCase_ : Any = model
def a__ ( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> Optional[int]:
UpperCAmelCase_ : Any = self.model_type.from_pretrained(*__lowerCAmelCase ,**__lowerCAmelCase )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Tuple = arg_to_scheduler[self.hparams.lr_scheduler]
UpperCAmelCase_ : Union[str, Any] = get_schedule_func(
self.opt ,num_warmup_steps=self.hparams.warmup_steps ,num_training_steps=self.total_steps() )
UpperCAmelCase_ : Optional[Any] = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1}
return scheduler
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : str = self.model
UpperCAmelCase_ : Union[str, Any] = ['''bias''', '''LayerNorm.weight''']
UpperCAmelCase_ : int = [
{
'''params''': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'''weight_decay''': self.hparams.weight_decay,
},
{
'''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
if self.hparams.adafactor:
UpperCAmelCase_ : Any = Adafactor(
__lowerCAmelCase ,lr=self.hparams.learning_rate ,scale_parameter=__lowerCAmelCase ,relative_step=__lowerCAmelCase )
else:
UpperCAmelCase_ : Dict = AdamW(
__lowerCAmelCase ,lr=self.hparams.learning_rate ,eps=self.hparams.adam_epsilon )
UpperCAmelCase_ : Dict = optimizer
UpperCAmelCase_ : Union[str, Any] = self.get_lr_scheduler()
return [optimizer], [scheduler]
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict:
return self.validation_step(__lowerCAmelCase ,__lowerCAmelCase )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Optional[int]:
return self.validation_end(__lowerCAmelCase )
def a__ ( self ) -> Any:
UpperCAmelCase_ : Optional[Any] = max(1 ,self.hparams.gpus ) # TODO: consider num_tpu_cores
UpperCAmelCase_ : Optional[Any] = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> str:
if stage == "test":
UpperCAmelCase_ : Tuple = len(self.test_dataloader().dataset )
else:
UpperCAmelCase_ : int = self.get_dataloader('''train''' ,self.hparams.train_batch_size ,shuffle=__lowerCAmelCase )
UpperCAmelCase_ : Optional[int] = len(self.train_dataloader().dataset )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = False ) -> Union[str, Any]:
raise NotImplementedError('''You must implement this for your task''' )
def a__ ( self ) -> int:
return self.train_loader
def a__ ( self ) -> str:
return self.get_dataloader('''dev''' ,self.hparams.eval_batch_size ,shuffle=__lowerCAmelCase )
def a__ ( self ) -> List[Any]:
return self.get_dataloader('''test''' ,self.hparams.eval_batch_size ,shuffle=__lowerCAmelCase )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> int:
return os.path.join(
self.hparams.data_dir ,'''cached_{}_{}_{}'''.format(
__lowerCAmelCase ,list(filter(__lowerCAmelCase ,self.hparams.model_name_or_path.split('''/''' ) ) ).pop() ,str(self.hparams.max_seq_length ) ,) ,)
@pl.utilities.rank_zero_only
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
UpperCAmelCase_ : Any = self.output_dir.joinpath('''best_tfmr''' )
UpperCAmelCase_ : List[str] = self.step_count
self.model.save_pretrained(__lowerCAmelCase )
self.tokenizer.save_pretrained(__lowerCAmelCase )
@staticmethod
def a__ ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
parser.add_argument(
'''--model_name_or_path''' ,default=__lowerCAmelCase ,type=__lowerCAmelCase ,required=__lowerCAmelCase ,help='''Path to pretrained model or model identifier from huggingface.co/models''' ,)
parser.add_argument(
'''--config_name''' ,default='''''' ,type=__lowerCAmelCase ,help='''Pretrained config name or path if not the same as model_name''' )
parser.add_argument(
'''--tokenizer_name''' ,default=__lowerCAmelCase ,type=__lowerCAmelCase ,help='''Pretrained tokenizer name or path if not the same as model_name''' ,)
parser.add_argument(
'''--cache_dir''' ,default=str(Path(__lowerCAmelCase ).parent / '''test_run''' / '''cache''' ) ,type=__lowerCAmelCase ,help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' ,)
parser.add_argument(
'''--encoder_layerdrop''' ,type=__lowerCAmelCase ,help='''Encoder layer dropout probability (Optional). Goes into model.config''' ,)
parser.add_argument(
'''--decoder_layerdrop''' ,type=__lowerCAmelCase ,help='''Decoder layer dropout probability (Optional). Goes into model.config''' ,)
parser.add_argument(
'''--dropout''' ,type=__lowerCAmelCase ,help='''Dropout probability (Optional). Goes into model.config''' ,)
parser.add_argument(
'''--attention_dropout''' ,type=__lowerCAmelCase ,help='''Attention dropout probability (Optional). Goes into model.config''' ,)
parser.add_argument('''--learning_rate''' ,default=5e-5 ,type=__lowerCAmelCase ,help='''The initial learning rate for Adam.''' )
parser.add_argument(
'''--lr_scheduler''' ,default='''linear''' ,choices=__lowerCAmelCase ,metavar=__lowerCAmelCase ,type=__lowerCAmelCase ,help='''Learning rate scheduler''' ,)
parser.add_argument('''--weight_decay''' ,default=0.0 ,type=__lowerCAmelCase ,help='''Weight decay if we apply some.''' )
parser.add_argument('''--adam_epsilon''' ,default=1e-8 ,type=__lowerCAmelCase ,help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--warmup_steps''' ,default=0 ,type=__lowerCAmelCase ,help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--num_workers''' ,default=4 ,type=__lowerCAmelCase ,help='''kwarg passed to DataLoader''' )
parser.add_argument('''--num_train_epochs''' ,dest='''max_epochs''' ,default=3 ,type=__lowerCAmelCase )
parser.add_argument('''--train_batch_size''' ,default=32 ,type=__lowerCAmelCase )
parser.add_argument('''--eval_batch_size''' ,default=32 ,type=__lowerCAmelCase )
parser.add_argument('''--adafactor''' ,action='''store_true''' )
class __a( pl.Callback ):
"""simple docstring"""
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[int]:
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class __a( pl.Callback ):
"""simple docstring"""
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str:
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(__lowerCAmelCase )
class __a( pl.Callback ):
"""simple docstring"""
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ : Any = trainer.lr_schedulers[0]['''scheduler''']
UpperCAmelCase_ : Dict = {f'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(__lowerCAmelCase )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
rank_zero_info('''***** Validation results *****''' )
UpperCAmelCase_ : List[str] = trainer.callback_metrics
# Log results
for key in sorted(__lowerCAmelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(__lowerCAmelCase ,str(metrics[key] ) ) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
rank_zero_info('''***** Test results *****''' )
UpperCAmelCase_ : Tuple = trainer.callback_metrics
# Log and save results to file
UpperCAmelCase_ : Tuple = os.path.join(pl_module.hparams.output_dir ,'''test_results.txt''' )
with open(__lowerCAmelCase ,'''w''' ) as writer:
for key in sorted(__lowerCAmelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(__lowerCAmelCase ,str(metrics[key] ) ) )
writer.write('''{} = {}\n'''.format(__lowerCAmelCase ,str(metrics[key] ) ) )
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
parser.add_argument(
'''--output_dir''' , default=str(Path(lowerCAmelCase__ ).parent / '''test_run''' / '''model_checkpoints''' ) , type=lowerCAmelCase__ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=lowerCAmelCase__ , default='''O2''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=lowerCAmelCase__ )
parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=lowerCAmelCase__ , help='''Max gradient norm''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' )
parser.add_argument(
'''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=lowerCAmelCase__ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--seed''' , type=lowerCAmelCase__ , default=42 , help='''random seed for initialization''' )
parser.add_argument(
'''--data_dir''' , default=str(Path(lowerCAmelCase__ ).parent / '''test_run''' / '''dummy-train-data''' ) , type=lowerCAmelCase__ , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase=None , _lowercase=True , _lowercase=[] , _lowercase=None , _lowercase=None , **_lowercase , ):
'''simple docstring'''
pl.seed_everything(args.seed )
# init model
UpperCAmelCase_ : List[Any] = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowerCAmelCase__ )
# add custom checkpoints
if checkpoint_callback is None:
UpperCAmelCase_ : Dict = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowerCAmelCase__ )
if logging_callback is None:
UpperCAmelCase_ : Any = LoggingCallback()
UpperCAmelCase_ : Any = {}
if args.fpaa:
UpperCAmelCase_ : Optional[int] = 16
if args.gpus > 1:
UpperCAmelCase_ : str = '''auto'''
UpperCAmelCase_ : str = '''ddp'''
UpperCAmelCase_ : Dict = args.accumulate_grad_batches
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : Optional[int] = '''auto'''
UpperCAmelCase_ : Dict = pl.Trainer.from_argparse_args(
lowerCAmelCase__ , weights_summary=lowerCAmelCase__ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase__ , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase__ , )
if args.do_train:
trainer.fit(lowerCAmelCase__ )
else:
print('''RAG modeling tests with new set functions successfuly executed!''' )
return trainer
| 30
|
"""simple docstring"""
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase , **__lowerCAmelCase )
lowercase = Sql(
cache_dir=__lowerCAmelCase , features=__lowerCAmelCase , sql=__lowerCAmelCase , con=__lowerCAmelCase , **__lowerCAmelCase , )
def A__ ( self ):
"""simple docstring"""
lowercase = None
lowercase = None
lowercase = None
lowercase = None
self.builder.download_and_prepare(
download_config=__lowerCAmelCase , download_mode=__lowerCAmelCase , verification_mode=__lowerCAmelCase , base_path=__lowerCAmelCase , )
# Build dataset for splits
lowercase = self.builder.as_dataset(
split="""train""" , verification_mode=__lowerCAmelCase , in_memory=self.keep_in_memory )
return dataset
class _A :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(f'num_proc {num_proc} must be an integer > 0.' )
lowercase = dataset
lowercase = name
lowercase = con
lowercase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
lowercase = num_proc
lowercase = to_sql_kwargs
def A__ ( self ):
"""simple docstring"""
lowercase = self.to_sql_kwargs.pop("""sql""" , __lowerCAmelCase )
lowercase = self.to_sql_kwargs.pop("""con""" , __lowerCAmelCase )
lowercase = self.to_sql_kwargs.pop("""index""" , __lowerCAmelCase )
lowercase = self._write(index=__lowerCAmelCase , **self.to_sql_kwargs )
return written
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase , lowercase , lowercase = args
lowercase = {**to_sql_kwargs, """if_exists""": """append"""} if offset > 0 else to_sql_kwargs
lowercase = query_table(
table=self.dataset.data , key=slice(__lowerCAmelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
lowercase = batch.to_pandas()
lowercase = df.to_sql(self.name , self.con , index=__lowerCAmelCase , **__lowerCAmelCase )
return num_rows or len(__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
lowercase , lowercase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , __lowerCAmelCase , __lowerCAmelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating SQL from Arrow format""" , ):
written += num_rows
return written
| 359
| 0
|
import heapq
def snake_case ( snake_case__ :dict) -> set[int]:
_A = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(snake_case__ , [-1 * len(snake_case__), (key, value)])
# chosen_vertices = set of chosen vertices
_A = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
_A = heapq.heappop(snake_case__)[1][0]
chosen_vertices.add(snake_case__)
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
_A = elem[1][1].index(snake_case__)
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(snake_case__)
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F'''Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}''')
| 701
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Tuple = '''philschmid/bart-large-cnn-samsum'''
lowerCamelCase :Tuple = (
'''This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, '''
'''and returns a summary of the text.'''
)
lowerCamelCase :List[Any] = '''summarizer'''
lowerCamelCase :List[str] = AutoTokenizer
lowerCamelCase :Dict = AutoModelForSeqaSeqLM
lowerCamelCase :int = ['''text''']
lowerCamelCase :List[Any] = ['''text''']
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> List[Any]:
return self.pre_processor(lowerCAmelCase_ , return_tensors="""pt""" , truncation=lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
return self.model.generate(**lowerCAmelCase_ )[0]
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Union[str, Any]:
return self.pre_processor.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ , clean_up_tokenization_spaces=lowerCAmelCase_ )
| 83
| 0
|
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=3_0 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=3_2 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=3_7 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1_0 , lowerCAmelCase__=0.02 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = parent
SCREAMING_SNAKE_CASE_ : List[str] = batch_size
SCREAMING_SNAKE_CASE_ : List[str] = image_size
SCREAMING_SNAKE_CASE_ : Tuple = patch_size
SCREAMING_SNAKE_CASE_ : Any = num_channels
SCREAMING_SNAKE_CASE_ : Optional[Any] = is_training
SCREAMING_SNAKE_CASE_ : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE_ : str = hidden_size
SCREAMING_SNAKE_CASE_ : Optional[int] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ : int = intermediate_size
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : str = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : List[Any] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE_ : Dict = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE_ : str = num_patches + 1
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : List[Any] = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
return config, pixel_values
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = FlaxViTModel(config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(lowerCAmelCase__ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE_ : Dict = (self.image_size, self.image_size)
SCREAMING_SNAKE_CASE_ : Optional[int] = (self.patch_size, self.patch_size)
SCREAMING_SNAKE_CASE_ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.type_sequence_label_size
SCREAMING_SNAKE_CASE_ : str = FlaxViTForImageClassification(config=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FlaxViTForImageClassification(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) : int = config_and_inputs
SCREAMING_SNAKE_CASE_ : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class __lowercase (__SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = FlaxViTModelTester(self )
SCREAMING_SNAKE_CASE_ : Optional[int] = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=3_7 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : Any = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(lowerCAmelCase__ )
@jax.jit
def model_jitted(lowerCAmelCase__ , **lowerCAmelCase__ ):
return model(pixel_values=lowerCAmelCase__ , **lowerCAmelCase__ )
with self.subTest('JIT Enabled' ):
SCREAMING_SNAKE_CASE_ : int = model_jitted(**lowerCAmelCase__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE_ : str = model_jitted(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
for jitted_output, output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Dict = model_class_name.from_pretrained('google/vit-base-patch16-224' )
SCREAMING_SNAKE_CASE_ : Dict = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(lowerCAmelCase__ )
| 101
|
import sys
import turtle
def a ( lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ):
'''simple docstring'''
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(lowerCamelCase_ , get_mid(lowerCamelCase_ , lowerCamelCase_ ) , get_mid(lowerCamelCase_ , lowerCamelCase_ ) , depth - 1 )
triangle(lowerCamelCase_ , get_mid(lowerCamelCase_ , lowerCamelCase_ ) , get_mid(lowerCamelCase_ , lowerCamelCase_ ) , depth - 1 )
triangle(lowerCamelCase_ , get_mid(lowerCamelCase_ , lowerCamelCase_ ) , get_mid(lowerCamelCase_ , lowerCamelCase_ ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'Correct format for using this script: '
'python fractals.py <int:depth_for_fractal>'
)
A__ : Dict = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('red')
A__ : Union[str, Any] = [(-1_75, -1_25), (0, 1_75), (1_75, -1_25)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 183
| 0
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
SCREAMING_SNAKE_CASE_ = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowercase__ ( lowerCAmelCase : List[Any] ) -> List[str]:
"""simple docstring"""
if isinstance(lowerCAmelCase , torch.Tensor ):
return image
elif isinstance(lowerCAmelCase , PIL.Image.Image ):
UpperCAmelCase = [image]
UpperCAmelCase = [trans(img.convert('RGB' ) ) for img in image]
UpperCAmelCase = torch.stack(lowerCAmelCase )
return image
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , lowercase_ , lowercase_ ) -> List[str]:
super().__init__()
# make sure scheduler can always be converted to DDIM
UpperCAmelCase = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
def a_ ( self , lowercase_ ) -> Tuple:
if strength < 0 or strength > 1:
raise ValueError(F"The value of strength should in [0.0, 1.0] but is {strength}" )
def a_ ( self , lowercase_ , lowercase_ , lowercase_ ) -> Any:
# get the original timestep using init_timestep
UpperCAmelCase = min(int(num_inference_steps * strength ) , lowercase_ )
UpperCAmelCase = max(num_inference_steps - init_timestep , 0 )
UpperCAmelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def a_ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=None ) -> Optional[Any]:
if not isinstance(lowercase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase_ )}" )
UpperCAmelCase = image.to(device=lowercase_ , dtype=lowercase_ )
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
F"You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch"
F" size of {batch_size}. Make sure the batch size matches the length of the generators." )
UpperCAmelCase = init_latents.shape
UpperCAmelCase = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
# get latents
print('add noise to latents at timestep' , lowercase_ )
UpperCAmelCase = self.scheduler.add_noise(lowercase_ , lowercase_ , lowercase_ )
UpperCAmelCase = init_latents
return latents
@torch.no_grad()
def __call__( self , lowercase_ = None , lowercase_ = 0.8 , lowercase_ = 1 , lowercase_ = None , lowercase_ = 0.0 , lowercase_ = 5_0 , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , ) -> Union[ImagePipelineOutput, Tuple]:
self.check_inputs(lowercase_ )
# 2. Preprocess image
UpperCAmelCase = preprocess(lowercase_ )
# 3. set timesteps
self.scheduler.set_timesteps(lowercase_ , device=self.device )
UpperCAmelCase , UpperCAmelCase = self.get_timesteps(lowercase_ , lowercase_ , self.device )
UpperCAmelCase = timesteps[:1].repeat(lowercase_ )
# 4. Prepare latent variables
UpperCAmelCase = self.prepare_latents(lowercase_ , lowercase_ , lowercase_ , self.unet.dtype , self.device , lowercase_ )
UpperCAmelCase = latents
# 5. Denoising loop
for t in self.progress_bar(lowercase_ ):
# 1. predict noise model_output
UpperCAmelCase = self.unet(lowercase_ , lowercase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCAmelCase = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , eta=lowercase_ , use_clipped_model_output=lowercase_ , generator=lowercase_ , ).prev_sample
UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=lowercase_ )
| 183
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
@staticmethod
@abstractmethod
def a_ ( lowercase_ ) -> Optional[Any]:
raise NotImplementedError()
@abstractmethod
def a_ ( self ) -> Any:
raise NotImplementedError()
| 183
| 1
|
"""simple docstring"""
from __future__ import annotations
def _a ( _snake_case ):
"""simple docstring"""
if not nums:
raise ValueError("""List is empty""" )
return sum(_snake_case ) / len(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 341
|
"""simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class lowerCamelCase__ ( snake_case ):
def _UpperCamelCase ( self ):
UpperCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A ,"""hidden_sizes""" ) )
self.parent.assertTrue(hasattr(A ,"""num_attention_heads""" ) )
self.parent.assertTrue(hasattr(A ,"""num_encoder_blocks""" ) )
class lowerCamelCase__ :
def __init__( self ,A ,A=13 ,A=64 ,A=3 ,A=4 ,A=[2, 2, 2, 2] ,A=[8, 4, 2, 1] ,A=[16, 32, 64, 128] ,A=[1, 4, 8, 16] ,A=[1, 2, 4, 8] ,A=True ,A=True ,A="gelu" ,A=0.1 ,A=0.1 ,A=0.02 ,A=3 ,A=None ,):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = num_channels
UpperCAmelCase = num_encoder_blocks
UpperCAmelCase = sr_ratios
UpperCAmelCase = depths
UpperCAmelCase = hidden_sizes
UpperCAmelCase = downsampling_rates
UpperCAmelCase = num_attention_heads
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = scope
def _UpperCamelCase ( self ):
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self ):
return SegformerConfig(
image_size=self.image_size ,num_channels=self.num_channels ,num_encoder_blocks=self.num_encoder_blocks ,depths=self.depths ,hidden_sizes=self.hidden_sizes ,num_attention_heads=self.num_attention_heads ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,)
def _UpperCamelCase ( self ,A ,A ,A ):
UpperCAmelCase = SegformerModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase = model(A )
UpperCAmelCase = UpperCAmelCase = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def _UpperCamelCase ( self ,A ,A ,A ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = SegformerForSemanticSegmentation(A )
model.to(A )
model.eval()
UpperCAmelCase = model(A )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
UpperCAmelCase = model(A ,labels=A )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss ,0.0 )
def _UpperCamelCase ( self ,A ,A ,A ):
UpperCAmelCase = 1
UpperCAmelCase = SegformerForSemanticSegmentation(config=A )
model.to(A )
model.eval()
UpperCAmelCase = torch.randint(0 ,1 ,(self.batch_size, self.image_size, self.image_size) ).to(A )
UpperCAmelCase = model(A ,labels=A )
self.parent.assertGreater(result.loss ,0.0 )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( snake_case , snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _UpperCamelCase ( self ):
UpperCAmelCase = SegformerModelTester(self )
UpperCAmelCase = SegformerConfigTester(self ,config_class=A )
def _UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*A )
@unittest.skip("""SegFormer does not use inputs_embeds""" )
def _UpperCamelCase ( self ):
pass
@unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" )
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(A )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,A )
def _UpperCamelCase ( self ):
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = True
for model_class in self.all_model_classes:
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = True
UpperCAmelCase = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(A ,A ) )
UpperCAmelCase = outputs.attentions
UpperCAmelCase = sum(self.model_tester.depths )
self.assertEqual(len(A ) ,A )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase = True
UpperCAmelCase = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(A ,A ) )
UpperCAmelCase = outputs.attentions
self.assertEqual(len(A ) ,A )
# verify the first attentions (first block, first layer)
UpperCAmelCase = (self.model_tester.image_size // 4) ** 2
UpperCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] ,)
# verify the last attentions (last block, last layer)
UpperCAmelCase = (self.model_tester.image_size // 32) ** 2
UpperCAmelCase = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) ,[self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] ,)
UpperCAmelCase = len(A )
# Check attention is always last and order is fine
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(A ,A ) )
self.assertEqual(out_len + 1 ,len(A ) )
UpperCAmelCase = outputs.attentions
self.assertEqual(len(A ) ,A )
# verify the first attentions (first block, first layer)
UpperCAmelCase = (self.model_tester.image_size // 4) ** 2
UpperCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] ,)
def _UpperCamelCase ( self ):
def check_hidden_states_output(A ,A ,A ):
UpperCAmelCase = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(A ,A ) )
UpperCAmelCase = outputs.hidden_states
UpperCAmelCase = self.model_tester.num_encoder_blocks
self.assertEqual(len(A ) ,A )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) ,[
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] ,)
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = True
check_hidden_states_output(A ,A ,A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(A ,A ,A )
def _UpperCamelCase ( self ):
if not self.model_tester.is_training:
return
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(A ):
continue
UpperCAmelCase = model_class(A )
model.to(A )
model.train()
UpperCAmelCase = self._prepare_for_class(A ,A ,return_labels=A )
UpperCAmelCase = model(**A ).loss
loss.backward()
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _UpperCamelCase ( self ):
pass
@slow
def _UpperCamelCase ( self ):
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = SegformerModel.from_pretrained(A )
self.assertIsNotNone(A )
def _a ( ):
"""simple docstring"""
UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self ):
# only resize + normalize
UpperCAmelCase = SegformerImageProcessor(
image_scale=(512, 512) ,keep_ratio=A ,align=A ,do_random_crop=A )
UpperCAmelCase = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
A )
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=A ,return_tensors="""pt""" )
UpperCAmelCase = encoded_inputs.pixel_values.to(A )
with torch.no_grad():
UpperCAmelCase = model(A )
UpperCAmelCase = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape ,A )
UpperCAmelCase = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] ,A ,atol=1e-4 ) )
@slow
def _UpperCamelCase ( self ):
# only resize + normalize
UpperCAmelCase = SegformerImageProcessor(
image_scale=(512, 512) ,keep_ratio=A ,align=A ,do_random_crop=A )
UpperCAmelCase = SegformerForSemanticSegmentation.from_pretrained(
"""nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(A )
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=A ,return_tensors="""pt""" )
UpperCAmelCase = encoded_inputs.pixel_values.to(A )
with torch.no_grad():
UpperCAmelCase = model(A )
UpperCAmelCase = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape ,A )
UpperCAmelCase = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] ,A ,atol=1e-1 ) )
@slow
def _UpperCamelCase ( self ):
# only resize + normalize
UpperCAmelCase = SegformerImageProcessor(
image_scale=(512, 512) ,keep_ratio=A ,align=A ,do_random_crop=A )
UpperCAmelCase = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
A )
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=A ,return_tensors="""pt""" )
UpperCAmelCase = encoded_inputs.pixel_values.to(A )
with torch.no_grad():
UpperCAmelCase = model(A )
UpperCAmelCase = outputs.logits.detach().cpu()
UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=A ,target_sizes=[(500, 300)] )
UpperCAmelCase = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape ,A )
UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=A )
UpperCAmelCase = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape ,A )
| 341
| 1
|
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase ) -> str:
"""simple docstring"""
assert isinstance(UpperCAmelCase , UpperCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
"""simple docstring"""
_a : List[str] = tmp_path / '''cache'''
_a : Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_a : Union[str, Any] = SqlDatasetReader(
'''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=UpperCAmelCase , keep_in_memory=UpperCAmelCase ).read()
_check_sql_dataset(UpperCAmelCase , UpperCAmelCase )
@require_sqlalchemy
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
_a : Optional[int] = tmp_path / '''cache'''
_a : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
_a : str = features.copy() if features else default_expected_features
_a : int = (
Features({feature: Value(UpperCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_a : Optional[Any] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , features=UpperCAmelCase , cache_dir=UpperCAmelCase ).read()
_check_sql_dataset(UpperCAmelCase , UpperCAmelCase )
def UpperCamelCase__ ( UpperCAmelCase ) -> int:
"""simple docstring"""
with contextlib.closing(sqlitea.connect(UpperCAmelCase ) ) as con:
_a : Union[str, Any] = con.cursor()
cur.execute('''SELECT * FROM dataset''' )
for row in cur:
yield row
@require_sqlalchemy
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
_a : List[Any] = tmp_path / '''cache'''
_a : Any = os.path.join(UpperCAmelCase , '''tmp.sql''' )
_a : List[str] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=UpperCAmelCase ).read()
SqlDatasetWriter(UpperCAmelCase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=1 ).write()
_a : Union[str, Any] = iter_sql_file(UpperCAmelCase )
_a : List[Any] = iter_sql_file(UpperCAmelCase )
for rowa, rowa in zip(UpperCAmelCase , UpperCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str:
"""simple docstring"""
_a : str = tmp_path / '''cache'''
_a : List[Any] = os.path.join(UpperCAmelCase , '''tmp.sql''' )
_a : Optional[int] = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=UpperCAmelCase ).read()
SqlDatasetWriter(UpperCAmelCase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=2 ).write()
_a : Tuple = iter_sql_file(UpperCAmelCase )
_a : Union[str, Any] = iter_sql_file(UpperCAmelCase )
for rowa, rowa in zip(UpperCAmelCase , UpperCAmelCase ):
assert rowa == rowa
@require_sqlalchemy
def UpperCamelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
"""simple docstring"""
_a : str = tmp_path / '''cache'''
_a : int = os.path.join(UpperCAmelCase , '''tmp.sql''' )
_a : Dict = SqlDatasetReader('''dataset''' , '''sqlite:///''' + sqlite_path , cache_dir=UpperCAmelCase ).read()
with pytest.raises(UpperCAmelCase ):
SqlDatasetWriter(UpperCAmelCase , '''dataset''' , '''sqlite:///''' + output_sqlite_path , num_proc=0 ).write()
| 307
|
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
__lowerCamelCase = TypeVar('T')
__lowerCamelCase = TypeVar('U')
class UpperCamelCase_ ( Generic[T, U] ):
def __init__( self , lowercase , lowercase ) -> Any:
_a : Optional[Any] = key
_a : List[str] = val
_a : DoubleLinkedListNode[T, U] | None = None
_a : DoubleLinkedListNode[T, U] | None = None
def __repr__( self ) -> str:
return (
F'Node: key: {self.key}, val: {self.val}, '
F'has next: {bool(self.next )}, has prev: {bool(self.prev )}'
)
class UpperCamelCase_ ( Generic[T, U] ):
def __init__( self ) -> None:
_a : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase , lowercase )
_a : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase , lowercase )
_a , _a : Optional[Any] = self.rear, self.head
def __repr__( self ) -> str:
_a : List[Any] = ['''DoubleLinkedList''']
_a : List[str] = self.head
while node.next is not None:
rep.append(str(lowercase ) )
_a : Any = node.next
rep.append(str(self.rear ) )
return ",\n ".join(lowercase )
def snake_case__( self , lowercase ) -> None:
_a : int = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_a : str = node
_a : List[str] = previous
_a : List[Any] = node
_a : Tuple = self.rear
def snake_case__( self , lowercase ) -> DoubleLinkedListNode[T, U] | None:
if node.prev is None or node.next is None:
return None
_a : str = node.next
_a : Tuple = node.prev
_a : Dict = None
_a : List[str] = None
return node
class UpperCamelCase_ ( Generic[T, U] ):
lowercase = {}
def __init__( self , lowercase ) -> int:
_a : DoubleLinkedList[T, U] = DoubleLinkedList()
_a : Optional[Any] = capacity
_a : Dict = 0
_a : Optional[int] = 0
_a : Dict = 0
_a : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self ) -> str:
return (
F'CacheInfo(hits={self.hits}, misses={self.miss}, '
F'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__( self , lowercase ) -> bool:
return key in self.cache
def snake_case__( self , lowercase ) -> U | None:
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
_a : DoubleLinkedListNode[T, U] = self.cache[key]
_a : Dict = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowercase )
return node.val
self.miss += 1
return None
def snake_case__( self , lowercase , lowercase ) -> None:
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_a : Optional[Any] = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowercase ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_a : Union[str, Any] = DoubleLinkedListNode(lowercase , lowercase )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_a : List[str] = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_a : Dict = value
self.list.add(lowercase )
@classmethod
def snake_case__( cls , lowercase = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
def cache_decorator_inner(lowercase ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowercase ) -> U:
if func not in cls.decorator_function_to_instance_map:
_a : str = LRUCache(lowercase )
_a : List[str] = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_a : Optional[int] = func(*lowercase )
cls.decorator_function_to_instance_map[func].put(args[0] , lowercase )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowercase , '''cache_info''' , lowercase ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 307
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCAmelCase : Dict = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] = ["""MLukeTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
lowerCAmelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 671
|
lowerCAmelCase : List[str] = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Any = set()
# keep track of all the paths to be checked
SCREAMING_SNAKE_CASE_: Tuple = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
SCREAMING_SNAKE_CASE_: List[Any] = queue.pop(0 )
# get the last node from the path
SCREAMING_SNAKE_CASE_: Tuple = path[-1]
if node not in explored:
SCREAMING_SNAKE_CASE_: Union[str, Any] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
SCREAMING_SNAKE_CASE_: int = list(_UpperCAmelCase )
new_path.append(_UpperCAmelCase )
queue.append(_UpperCAmelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(_UpperCAmelCase )
# in case there's no path between the 2 nodes
return []
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
SCREAMING_SNAKE_CASE_: List[Any] = [start]
SCREAMING_SNAKE_CASE_: List[str] = set(_UpperCAmelCase )
# Keep tab on distances from `start` node.
SCREAMING_SNAKE_CASE_: Union[str, Any] = {start: 0, target: -1}
while queue:
SCREAMING_SNAKE_CASE_: Dict = queue.pop(0 )
if node == target:
SCREAMING_SNAKE_CASE_: Tuple = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(_UpperCAmelCase )
queue.append(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: Union[str, Any] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, """G""", """D""")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, """G""", """D""")) # returns 4
| 671
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
snake_case_ : Tuple = int(SCREAMING_SNAKE_CASE__ )
if decimal in (0, 1): # Exit cases for the recursion
return str(SCREAMING_SNAKE_CASE__ )
snake_case_ , snake_case_ : List[Any] = divmod(SCREAMING_SNAKE_CASE__ , 2 )
return binary_recursive(SCREAMING_SNAKE_CASE__ ) + str(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
snake_case_ : Optional[int] = str(SCREAMING_SNAKE_CASE__ ).strip()
if not number:
raise ValueError("""No input value was provided""" )
snake_case_ : str = """-""" if number.startswith("""-""" ) else """"""
snake_case_ : Dict = number.lstrip("""-""" )
if not number.isnumeric():
raise ValueError("""Input value is not an integer""" )
return f'{negative}0b{binary_recursive(int(SCREAMING_SNAKE_CASE__ ) )}'
if __name__ == "__main__":
from doctest import testmod
testmod()
| 48
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : list ):
"""simple docstring"""
snake_case_ : Optional[int] = len(SCREAMING_SNAKE_CASE__ )
for i in range(1 , SCREAMING_SNAKE_CASE__ ):
snake_case_ : Tuple = collection[i]
snake_case_ : Tuple = 0
snake_case_ : str = i - 1
while low <= high:
snake_case_ : Optional[int] = (low + high) // 2
if val < collection[mid]:
snake_case_ : List[str] = mid - 1
else:
snake_case_ : str = mid + 1
for j in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , -1 ):
snake_case_ : List[str] = collection[j - 1]
snake_case_ : Any = val
return collection
if __name__ == "__main__":
a_ = input('''Enter numbers separated by a comma:\n''').strip()
a_ = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 48
| 1
|
'''simple docstring'''
def A ( UpperCamelCase_ : list ) -> list:
'''simple docstring'''
lowerCAmelCase__ = len(UpperCamelCase_ )
for i in range(1 , UpperCamelCase_ ):
lowerCAmelCase__ = collection[i]
lowerCAmelCase__ = 0
lowerCAmelCase__ = i - 1
while low <= high:
lowerCAmelCase__ = (low + high) // 2
if val < collection[mid]:
lowerCAmelCase__ = mid - 1
else:
lowerCAmelCase__ = mid + 1
for j in range(UpperCamelCase_ , UpperCamelCase_ , -1 ):
lowerCAmelCase__ = collection[j - 1]
lowerCAmelCase__ = val
return collection
if __name__ == "__main__":
UpperCAmelCase__ : Tuple = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase__ : List[Any] = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 48
|
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def __UpperCamelCase ( snake_case__ ):
return 1.0 / (1.0 + np.exp(-_outputs ))
def __UpperCamelCase ( snake_case__ ):
A_ : Union[str, Any] = np.max(_outputs , axis=-1 , keepdims=snake_case__ )
A_ : str = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=snake_case__ )
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_A : Any = """sigmoid"""
_A : Any = """softmax"""
_A : Union[str, Any] = """none"""
@add_end_docstrings(
_SCREAMING_SNAKE_CASE , R"""
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `\"default\"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `\"sigmoid\"`: Applies the sigmoid function on the output.
- `\"softmax\"`: Applies the softmax function on the output.
- `\"none\"`: Does not apply any function on the output.
""" , )
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_A : Optional[int] = False
_A : Dict = ClassificationFunction.NONE
def __init__(self , **lowerCAmelCase_ ):
super().__init__(**lowerCAmelCase_ )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def lowerCamelCase(self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_="" , **lowerCAmelCase_ ):
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
A_ : Union[str, Any] = tokenizer_kwargs
A_ : List[str] = {}
if hasattr(self.model.config , """return_all_scores""" ) and return_all_scores is None:
A_ : Optional[int] = self.model.config.return_all_scores
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or top_k is None:
A_ : Dict = top_k
A_ : Any = False
elif return_all_scores is not None:
warnings.warn(
"""`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"""
""" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.""" , lowerCAmelCase_ , )
if return_all_scores:
A_ : List[Any] = None
else:
A_ : List[str] = 1
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
A_ : str = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
A_ : List[str] = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__(self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
A_ : List[str] = super().__call__(*lowerCAmelCase_ , **lowerCAmelCase_ )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
A_ : Union[str, Any] = """top_k""" not in kwargs
if isinstance(args[0] , lowerCAmelCase_ ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def lowerCamelCase(self , lowerCAmelCase_ , **lowerCAmelCase_ ):
A_ : Union[str, Any] = self.framework
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return self.tokenizer(**lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) == 1 and isinstance(inputs[0] , lowerCAmelCase_ ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"""The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"""
""" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair.""" )
return self.tokenizer(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase(self , lowerCAmelCase_ ):
return self.model(**lowerCAmelCase_ )
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=1 , lowerCAmelCase_=True ):
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
A_ : Optional[Any] = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
A_ : Tuple = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , """function_to_apply""" ) and function_to_apply is None:
A_ : Any = self.model.config.function_to_apply
else:
A_ : Dict = ClassificationFunction.NONE
A_ : Optional[Any] = model_outputs["""logits"""][0]
A_ : Tuple = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
A_ : str = sigmoid(lowerCAmelCase_ )
elif function_to_apply == ClassificationFunction.SOFTMAX:
A_ : Dict = softmax(lowerCAmelCase_ )
elif function_to_apply == ClassificationFunction.NONE:
A_ : Optional[int] = outputs
else:
raise ValueError(f"""Unrecognized `function_to_apply` argument: {function_to_apply}""" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
A_ : Optional[Any] = [
{"""label""": self.model.config.idalabel[i], """score""": score.item()} for i, score in enumerate(lowerCAmelCase_ )
]
if not _legacy:
dict_scores.sort(key=lambda lowerCAmelCase_ : x["score"] , reverse=lowerCAmelCase_ )
if top_k is not None:
A_ : str = dict_scores[:top_k]
return dict_scores
| 180
| 0
|
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def _snake_case ( SCREAMING_SNAKE_CASE ) -> Dict[str, torch.Tensor]:
"""simple docstring"""
_lowerCAmelCase : int = []
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : List[Any] = []
for rt in rc.restypes:
_lowerCAmelCase : int = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
_lowerCAmelCase : str = {name: i for i, name in enumerate(SCREAMING_SNAKE_CASE )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
_lowerCAmelCase : List[str] = torch.tensor(
SCREAMING_SNAKE_CASE , dtype=torch.intaa , device=protein["aatype"].device , )
_lowerCAmelCase : str = torch.tensor(
SCREAMING_SNAKE_CASE , dtype=torch.intaa , device=protein["aatype"].device , )
_lowerCAmelCase : Optional[Any] = torch.tensor(
SCREAMING_SNAKE_CASE , dtype=torch.floataa , device=protein["aatype"].device , )
_lowerCAmelCase : List[str] = protein["aatype"].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
_lowerCAmelCase : Dict = restype_atomaa_to_atomaa[protein_aatype]
_lowerCAmelCase : Optional[Any] = restype_atomaa_mask[protein_aatype]
_lowerCAmelCase : Any = residx_atomaa_mask
_lowerCAmelCase : Dict = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
_lowerCAmelCase : Optional[int] = restype_atomaa_to_atomaa[protein_aatype]
_lowerCAmelCase : str = residx_atomaa_to_atomaa.long()
# create the corresponding mask
_lowerCAmelCase : Dict = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["aatype"].device )
for restype, restype_letter in enumerate(rc.restypes ):
_lowerCAmelCase : Optional[Any] = rc.restype_atoa[restype_letter]
_lowerCAmelCase : int = rc.residue_atoms[restype_name]
for atom_name in atom_names:
_lowerCAmelCase : Union[str, Any] = rc.atom_order[atom_name]
_lowerCAmelCase : Dict = 1
_lowerCAmelCase : Tuple = restype_atomaa_mask[protein_aatype]
_lowerCAmelCase : List[str] = residx_atomaa_mask
return protein
def _snake_case ( SCREAMING_SNAKE_CASE ) -> Dict[str, np.ndarray]:
"""simple docstring"""
_lowerCAmelCase : Any = tree_map(lambda SCREAMING_SNAKE_CASE : torch.tensor(SCREAMING_SNAKE_CASE , device=batch["aatype"].device ) , SCREAMING_SNAKE_CASE , np.ndarray )
_lowerCAmelCase : Optional[int] = tensor_tree_map(lambda SCREAMING_SNAKE_CASE : np.array(SCREAMING_SNAKE_CASE ) , make_atomaa_masks(SCREAMING_SNAKE_CASE ) )
return out
| 714
|
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class A__ ( A ):
"""simple docstring"""
def __init__( self : Tuple , A_ : Optional[Any] , A_ : Dict=1_3 , A_ : str=7 , A_ : Union[str, Any]=True , A_ : int=True , A_ : Any=False , A_ : str=True , A_ : int=9_9 , A_ : int=3_2 , A_ : Optional[int]=5 , A_ : List[str]=4 , A_ : int=6_4 , A_ : Optional[int]="gelu" , A_ : List[Any]=0.1 , A_ : int=0.1 , A_ : List[str]=5_1_2 , A_ : Optional[Any]=1_6 , A_ : int=2 , A_ : Optional[int]=0.02 , A_ : Any=3 , A_ : Optional[Any]=4 , A_ : Union[str, Any]=None , A_ : Union[str, Any]=2 , A_ : Tuple=2 , A_ : Optional[int]=2 , A_ : List[Any]=2 , A_ : List[str]=4 , A_ : Union[str, Any]=1 , ):
'''simple docstring'''
_lowerCAmelCase : List[str] = parent
_lowerCAmelCase : List[str] = batch_size
_lowerCAmelCase : str = seq_length
_lowerCAmelCase : int = is_training
_lowerCAmelCase : Union[str, Any] = use_input_mask
_lowerCAmelCase : Union[str, Any] = use_token_type_ids
_lowerCAmelCase : List[Any] = use_labels
_lowerCAmelCase : Optional[int] = vocab_size
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : Optional[int] = num_attention_heads
_lowerCAmelCase : Optional[Any] = intermediate_size
_lowerCAmelCase : Any = hidden_act
_lowerCAmelCase : Tuple = hidden_dropout_prob
_lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase : List[Any] = max_position_embeddings
_lowerCAmelCase : List[Any] = type_vocab_size
_lowerCAmelCase : int = type_sequence_label_size
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : Tuple = num_labels
_lowerCAmelCase : Dict = num_choices
_lowerCAmelCase : Optional[Any] = scope
_lowerCAmelCase : Union[str, Any] = q_groups
_lowerCAmelCase : Tuple = k_groups
_lowerCAmelCase : str = v_groups
_lowerCAmelCase : Tuple = post_attention_groups
_lowerCAmelCase : Tuple = intermediate_groups
_lowerCAmelCase : List[Any] = output_groups
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : int = None
if self.use_input_mask:
_lowerCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : Dict = None
_lowerCAmelCase : Any = None
_lowerCAmelCase : List[Any] = None
if self.use_labels:
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : Any = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self : Any ):
'''simple docstring'''
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def __magic_name__ ( self : List[str] , A_ : Dict , A_ : Union[str, Any] , A_ : List[str] , A_ : Optional[int] , A_ : str , A_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = SqueezeBertModel(config=A_ )
model.to(A_ )
model.eval()
_lowerCAmelCase : List[Any] = model(A_ , A_ )
_lowerCAmelCase : Tuple = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : Optional[int] , A_ : int , A_ : Dict , A_ : Any , A_ : List[Any] , A_ : List[Any] , A_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = SqueezeBertForMaskedLM(config=A_ )
model.to(A_ )
model.eval()
_lowerCAmelCase : int = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : Optional[int] , A_ : Union[str, Any] , A_ : List[Any] , A_ : List[Any] , A_ : List[Any] , A_ : List[str] , A_ : int ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = SqueezeBertForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
_lowerCAmelCase : List[Any] = model(
A_ , attention_mask=A_ , start_positions=A_ , end_positions=A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self : Tuple , A_ : Optional[int] , A_ : Dict , A_ : str , A_ : Tuple , A_ : List[Any] , A_ : List[Any] ):
'''simple docstring'''
_lowerCAmelCase : int = self.num_labels
_lowerCAmelCase : int = SqueezeBertForSequenceClassification(A_ )
model.to(A_ )
model.eval()
_lowerCAmelCase : List[Any] = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : Optional[Any] , A_ : List[Any] , A_ : List[Any] , A_ : Tuple , A_ : List[Any] , A_ : List[Any] , A_ : List[Any] ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : Any = SqueezeBertForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
_lowerCAmelCase : Any = model(A_ , attention_mask=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : Tuple , A_ : Tuple , A_ : Tuple , A_ : Union[str, Any] , A_ : int , A_ : List[Any] , A_ : int ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.num_choices
_lowerCAmelCase : Dict = SqueezeBertForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
_lowerCAmelCase : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : str = model(
A_ , attention_mask=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Optional[int] = config_and_inputs
_lowerCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A__ ( A , A , unittest.TestCase ):
"""simple docstring"""
_lowercase : Tuple = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
_lowercase : Optional[Any] = (
{
'''feature-extraction''': SqueezeBertModel,
'''fill-mask''': SqueezeBertForMaskedLM,
'''question-answering''': SqueezeBertForQuestionAnswering,
'''text-classification''': SqueezeBertForSequenceClassification,
'''token-classification''': SqueezeBertForTokenClassification,
'''zero-shot''': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowercase : Union[str, Any] = False
_lowercase : int = True
_lowercase : List[str] = False
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
_lowerCAmelCase : int = SqueezeBertModelTester(self )
_lowerCAmelCase : Tuple = ConfigTester(self , config_class=A_ , dim=3_7 )
def __magic_name__ ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*A_ )
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*A_ )
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*A_ )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
_lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*A_ )
def __magic_name__ ( self : Dict ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*A_ )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*A_ )
@slow
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : int = SqueezeBertModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_sentencepiece
@require_tokenizers
@require_torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli" )
_lowerCAmelCase : Optional[int] = torch.tensor([[1, 2_9_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 1_3, 1_5_8_8, 2]] )
_lowerCAmelCase : List[str] = model(A_ )[0]
_lowerCAmelCase : Any = torch.Size((1, 3) )
self.assertEqual(output.shape , A_ )
_lowerCAmelCase : Any = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(A_ , A_ , atol=1E-4 ) )
| 503
| 0
|
'''simple docstring'''
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'The RoBERTa Model transformer with early exiting (DeeRoBERTa). ' , __a , )
class UpperCAmelCase_ (__a ):
"""simple docstring"""
lowerCamelCase : List[Any] = RobertaConfig
lowerCamelCase : Optional[Any] = '''roberta'''
def __init__( self , SCREAMING_SNAKE_CASE_ ) -> str:
super().__init__(_lowercase )
__lowerCamelCase : Optional[int] = RobertaEmbeddings(_lowercase )
self.init_weights()
@add_start_docstrings(
'RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ' , __a , )
class UpperCAmelCase_ (__a ):
"""simple docstring"""
lowerCamelCase : Dict = RobertaConfig
lowerCamelCase : Union[str, Any] = '''roberta'''
def __init__( self , SCREAMING_SNAKE_CASE_ ) -> Any:
super().__init__(_lowercase )
__lowerCamelCase : Union[str, Any] = config.num_labels
__lowerCamelCase : Tuple = config.num_hidden_layers
__lowerCamelCase : int = DeeRobertaModel(_lowercase )
__lowerCamelCase : str = nn.Dropout(config.hidden_dropout_prob )
__lowerCamelCase : Union[str, Any] = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(_lowercase )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=-1 , SCREAMING_SNAKE_CASE_=False , ) -> Tuple:
__lowerCamelCase : Optional[Any] = self.num_layers
try:
__lowerCamelCase : Optional[int] = self.roberta(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , position_ids=_lowercase , head_mask=_lowercase , inputs_embeds=_lowercase , )
__lowerCamelCase : str = outputs[1]
__lowerCamelCase : Optional[int] = self.dropout(_lowercase )
__lowerCamelCase : Dict = self.classifier(_lowercase )
__lowerCamelCase : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__lowerCamelCase : List[str] = e.message
__lowerCamelCase : Any = e.exit_layer
__lowerCamelCase : str = outputs[0]
if not self.training:
__lowerCamelCase : Optional[Any] = entropy(_lowercase )
__lowerCamelCase : Any = []
__lowerCamelCase : Tuple = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__lowerCamelCase : Union[str, Any] = MSELoss()
__lowerCamelCase : List[Any] = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__lowerCamelCase : str = CrossEntropyLoss()
__lowerCamelCase : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__lowerCamelCase : Dict = []
for highway_exit in outputs[-1]:
__lowerCamelCase : Optional[Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(_lowercase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__lowerCamelCase : List[str] = MSELoss()
__lowerCamelCase : List[str] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__lowerCamelCase : Union[str, Any] = CrossEntropyLoss()
__lowerCamelCase : Optional[Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_lowercase )
if train_highway:
__lowerCamelCase : int = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__lowerCamelCase : Tuple = (loss,) + outputs
if not self.training:
__lowerCamelCase : List[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__lowerCamelCase : Tuple = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 13
|
import torch
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
if torch.cuda.is_available():
_UpperCamelCase: Any = torch.cuda.device_count()
else:
_UpperCamelCase: Union[str, Any] = 0
print(F"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 271
| 0
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
_A = 'pytorch_model.bin'
_A = 'pytorch_model.bin.index.json'
_A = 'adapter_config.json'
_A = 'adapter_model.bin'
_A = 'adapter_model.safetensors'
_A = 'tf_model.h5'
_A = 'tf_model.h5.index.json'
_A = 'model.ckpt'
_A = 'flax_model.msgpack'
_A = 'flax_model.msgpack.index.json'
_A = 'model.safetensors'
_A = 'model.safetensors.index.json'
_A = 'config.json'
_A = 'preprocessor_config.json'
_A = FEATURE_EXTRACTOR_NAME
_A = 'generation_config.json'
_A = 'modelcard.json'
_A = '▁'
_A = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
_A = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
_A = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
_A = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> Tuple:
if version.parse(__UpperCAmelCase ) < version.parse(__UpperCAmelCase ):
if "dev" in min_version:
SCREAMING_SNAKE_CASE__ = (
"This example requires a source install from HuggingFace Transformers (see "
"`https://huggingface.co/docs/transformers/installation#install-from-source`),"
)
else:
SCREAMING_SNAKE_CASE__ = F"""This example requires a minimum version of {min_version},"""
error_message += F""" but the version found is {__version__}.\n"""
raise ImportError(
error_message
+ "Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other "
"versions of HuggingFace Transformers." )
| 538
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_A = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 538
| 1
|
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
UpperCamelCase : List[str] = argparse.ArgumentParser("""Stable Diffusion script with intel optimization""", add_help=False)
parser.add_argument("""--dpm""", action="""store_true""", help="""Enable DPMSolver or not""")
parser.add_argument("""--steps""", default=None, type=int, help="""Num inference steps""")
UpperCamelCase : Any = parser.parse_args()
UpperCamelCase : Tuple = """cpu"""
UpperCamelCase : int = """a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"""
UpperCamelCase : List[Any] = """path-to-your-trained-model"""
UpperCamelCase : Dict = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
UpperCamelCase : Tuple = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
UpperCamelCase : Dict = pipe.to(device)
# to channels last
UpperCamelCase : Optional[int] = pipe.unet.to(memory_format=torch.channels_last)
UpperCamelCase : Union[str, Any] = pipe.vae.to(memory_format=torch.channels_last)
UpperCamelCase : str = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
UpperCamelCase : str = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
UpperCamelCase : str = torch.randn(2, 4, 64, 64)
UpperCamelCase : Union[str, Any] = torch.rand(1) * 999
UpperCamelCase : Any = torch.randn(2, 77, 768)
UpperCamelCase : Tuple = (sample, timestep, encoder_hidden_status)
try:
UpperCamelCase : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
UpperCamelCase : Optional[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
UpperCamelCase : str = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
UpperCamelCase : Tuple = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
UpperCamelCase : Any = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
UpperCamelCase : Optional[int] = 666
UpperCamelCase : Any = torch.Generator(device).manual_seed(seed)
UpperCamelCase : int = {"""generator""": generator}
if args.steps is not None:
UpperCamelCase : Tuple = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
UpperCamelCase : str = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("""generated.png""")
| 37
|
'''simple docstring'''
from __future__ import annotations
import os
from collections.abc import Mapping
_UpperCamelCase : Any = tuple[int, int]
class snake_case__ :
def __init__( self : List[str] , _A : set[int] , _A : Mapping[EdgeT, int] ) -> None:
UpperCAmelCase_ : set[int] = vertices
UpperCAmelCase_ : dict[EdgeT, int] = {
(min(_A ), max(_A )): weight for edge, weight in edges.items()
}
def A ( self : Union[str, Any] , _A : EdgeT , _A : int ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
UpperCAmelCase_ : List[str] = weight
def A ( self : str ) -> Graph:
UpperCAmelCase_ : Graph = Graph({min(self.vertices )} , {} )
UpperCAmelCase_ : EdgeT
UpperCAmelCase_ : int
UpperCAmelCase_ : EdgeT
UpperCAmelCase_ : int
while len(subgraph.vertices ) < len(self.vertices ):
UpperCAmelCase_ : Optional[Any] = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
UpperCAmelCase_ : Dict = edge
UpperCAmelCase_ : Tuple = weight
subgraph.add_edge(_A , _A )
return subgraph
def __UpperCAmelCase ( A : str = "p107_network.txt" ) -> int:
UpperCAmelCase_ : str = os.path.abspath(os.path.dirname(A ) )
UpperCAmelCase_ : str = os.path.join(A , A )
UpperCAmelCase_ : dict[EdgeT, int] = {}
UpperCAmelCase_ : list[str]
UpperCAmelCase_ : int
UpperCAmelCase_ : int
with open(A ) as f:
UpperCAmelCase_ : int = f.read().strip().split('''\n''' )
UpperCAmelCase_ : Any = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(A ) ):
for edgea in range(A ):
if adjaceny_matrix[edgea][edgea] != "-":
UpperCAmelCase_ : Optional[int] = int(adjaceny_matrix[edgea][edgea] )
UpperCAmelCase_ : Graph = Graph(set(range(len(A ) ) ) , A )
UpperCAmelCase_ : Graph = graph.prims_algorithm()
UpperCAmelCase_ : int = sum(graph.edges.values() )
UpperCAmelCase_ : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 541
| 0
|
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> Any:
_UpperCAmelCase = args.log_outputs
_UpperCAmelCase = """_""".join(args.dataset.split("""/""" ) + [args.config, args.split] )
# load metric
_UpperCAmelCase = load_metric("""wer""" )
_UpperCAmelCase = load_metric("""cer""" )
# compute metrics
_UpperCAmelCase = wer.compute(references=result["""target"""] , predictions=result["""prediction"""] )
_UpperCAmelCase = cer.compute(references=result["""target"""] , predictions=result["""prediction"""] )
# print & log results
_UpperCAmelCase = f"""WER: {wer_result}\nCER: {cer_result}"""
print(__snake_case )
with open(f"""{dataset_id}_eval_results.txt""" , """w""" ) as f:
f.write(__snake_case )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
_UpperCAmelCase = f"""log_{dataset_id}_predictions.txt"""
_UpperCAmelCase = f"""log_{dataset_id}_targets.txt"""
with open(__snake_case , """w""" ) as p, open(__snake_case , """w""" ) as t:
# mapping function to write output
def write_to_file(__snake_case , __snake_case ):
p.write(f"""{i}""" + """\n""" )
p.write(batch["""prediction"""] + """\n""" )
t.write(f"""{i}""" + """\n""" )
t.write(batch["""target"""] + """\n""" )
result.map(__snake_case , with_indices=__snake_case )
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> str:
_UpperCAmelCase = """[,?.!\-\;\:\"“%‘”�—’…–]""" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
_UpperCAmelCase = re.sub(__snake_case , """""" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
_UpperCAmelCase = ["""\n\n""", """\n""", """ """, """ """]
for t in token_sequences_to_ignore:
_UpperCAmelCase = """ """.join(text.split(__snake_case ) )
return text
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> Any:
# load dataset
_UpperCAmelCase = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__snake_case )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
_UpperCAmelCase = AutoFeatureExtractor.from_pretrained(args.model_id )
_UpperCAmelCase = feature_extractor.sampling_rate
# resample audio
_UpperCAmelCase = dataset.cast_column("""audio""" , Audio(sampling_rate=__snake_case ) )
# load eval pipeline
if args.device is None:
_UpperCAmelCase = 0 if torch.cuda.is_available() else -1
_UpperCAmelCase = pipeline("""automatic-speech-recognition""" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(__snake_case ):
_UpperCAmelCase = asr(
batch["""audio"""]["""array"""] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
_UpperCAmelCase = prediction["""text"""]
_UpperCAmelCase = normalize_text(batch["""sentence"""] )
return batch
# run inference on all examples
_UpperCAmelCase = dataset.map(__snake_case , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(__snake_case , __snake_case )
if __name__ == "__main__":
__a: Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers'''
)
parser.add_argument(
'''--dataset''',
type=str,
required=True,
help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''',
)
parser.add_argument(
'''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice'''
)
parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''')
parser.add_argument(
'''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.'''
)
parser.add_argument(
'''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.'''
)
parser.add_argument(
'''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.'''
)
parser.add_argument(
'''--device''',
type=int,
default=None,
help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''',
)
__a: Optional[int] = parser.parse_args()
main(args)
| 402
|
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> float:
return 1_0 - x * x
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> float:
# Bolzano theory in order to find if there is a root between a and b
if equation(__snake_case ) * equation(__snake_case ) >= 0:
raise ValueError("""Wrong space!""" )
_UpperCAmelCase = a
while (b - a) >= 0.01:
# Find middle point
_UpperCAmelCase = (a + b) / 2
# Check if middle point is root
if equation(__snake_case ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__snake_case ) * equation(__snake_case ) < 0:
_UpperCAmelCase = c
else:
_UpperCAmelCase = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 402
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
"configuration_trajectory_transformer": [
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrajectoryTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrajectoryTransformerModel",
"TrajectoryTransformerPreTrainedModel",
"load_tf_weights_in_trajectory_transformer",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : List[Any] = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Tuple = "camembert"
def __init__( self : Union[str, Any] , _lowercase : Any=3_05_22 , _lowercase : Any=7_68 , _lowercase : Union[str, Any]=12 , _lowercase : List[str]=12 , _lowercase : int=30_72 , _lowercase : Union[str, Any]="gelu" , _lowercase : Dict=0.1 , _lowercase : Optional[int]=0.1 , _lowercase : int=5_12 , _lowercase : Optional[Any]=2 , _lowercase : Dict=0.02 , _lowercase : Optional[Any]=1E-12 , _lowercase : Optional[int]=1 , _lowercase : Optional[Any]=0 , _lowercase : Tuple=2 , _lowercase : List[Any]="absolute" , _lowercase : List[Any]=True , _lowercase : Dict=None , **_lowercase : Optional[int] , ):
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = hidden_act
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = position_embedding_type
__UpperCAmelCase = use_cache
__UpperCAmelCase = classifier_dropout
class _UpperCAmelCase ( _lowerCAmelCase ):
@property
def a ( self : Tuple ):
if self.task == "multiple-choice":
__UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 49
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Union[str, Any] ={"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] =["""FNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any =["""FNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any =[
"""FNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FNetForMaskedLM""",
"""FNetForMultipleChoice""",
"""FNetForNextSentencePrediction""",
"""FNetForPreTraining""",
"""FNetForQuestionAnswering""",
"""FNetForSequenceClassification""",
"""FNetForTokenClassification""",
"""FNetLayer""",
"""FNetModel""",
"""FNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
A_ : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 720
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A_ : str =16
A_ : Any =32
def snake_case_ ( __snake_case : Accelerator , __snake_case : int = 16) -> List[Any]:
lowerCAmelCase_ = AutoTokenizer.from_pretrained('''bert-base-cased''')
lowerCAmelCase_ = load_dataset('''glue''' , '''mrpc''')
def tokenize_function(__snake_case : Optional[Any]):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__snake_case , max_length=__snake_case)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
lowerCAmelCase_ = datasets.map(
__snake_case , batched=__snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ = tokenized_datasets.rename_column('''label''' , '''labels''')
def collate_fn(__snake_case : int):
# On TPU it's best to pad everything to the same length or training will be very slow.
lowerCAmelCase_ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
lowerCAmelCase_ = 16
elif accelerator.mixed_precision != "no":
lowerCAmelCase_ = 8
else:
lowerCAmelCase_ = None
return tokenizer.pad(
__snake_case , padding='''longest''' , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors='''pt''' , )
# Instantiate dataloaders.
lowerCAmelCase_ = DataLoader(
tokenized_datasets['''train'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case)
lowerCAmelCase_ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case)
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A_ : Any =mocked_dataloaders # noqa: F811
def snake_case_ ( __snake_case : int , __snake_case : int) -> int:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __snake_case) == "1":
lowerCAmelCase_ = 2
# New Code #
lowerCAmelCase_ = int(args.gradient_accumulation_steps)
# Initialize accelerator
lowerCAmelCase_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__snake_case)
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'''Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`''')
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ = config['''lr''']
lowerCAmelCase_ = int(config['''num_epochs'''])
lowerCAmelCase_ = int(config['''seed'''])
lowerCAmelCase_ = int(config['''batch_size'''])
lowerCAmelCase_ = evaluate.load('''glue''' , '''mrpc''')
set_seed(__snake_case)
lowerCAmelCase_ ,lowerCAmelCase_ = get_dataloaders(__snake_case , __snake_case)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__snake_case)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase_ = model.to(accelerator.device)
# Instantiate optimizer
lowerCAmelCase_ = AdamW(params=model.parameters() , lr=__snake_case)
# Instantiate scheduler
lowerCAmelCase_ = get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=100 , num_training_steps=(len(__snake_case) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case)
# Now we train the model
for epoch in range(__snake_case):
model.train()
for step, batch in enumerate(__snake_case):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__snake_case):
lowerCAmelCase_ = model(**__snake_case)
lowerCAmelCase_ = output.loss
accelerator.backward(__snake_case)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__snake_case):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
lowerCAmelCase_ = model(**__snake_case)
lowerCAmelCase_ = outputs.logits.argmax(dim=-1)
lowerCAmelCase_ ,lowerCAmelCase_ = accelerator.gather_for_metrics((predictions, batch['''labels''']))
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
lowerCAmelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __snake_case)
def snake_case_ ( ) -> Optional[Any]:
lowerCAmelCase_ = argparse.ArgumentParser(description='''Simple example of training script.''')
parser.add_argument(
'''--mixed_precision''' , type=__snake_case , default=__snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=__snake_case , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''')
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__snake_case , __snake_case)
if __name__ == "__main__":
main()
| 606
| 0
|
"""simple docstring"""
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
lowerCAmelCase_ = ['''small''', '''medium''', '''large''']
lowerCAmelCase_ = '''lm_head.decoder.weight'''
lowerCAmelCase_ = '''lm_head.weight'''
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> str:
_SCREAMING_SNAKE_CASE : int = torch.load(__SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE : Union[str, Any] = d.pop(__SCREAMING_SNAKE_CASE )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
torch.save(__SCREAMING_SNAKE_CASE , os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
lowerCAmelCase_ = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
lowerCAmelCase_ = os.path.join(args.dialogpt_path, F"{MODEL}_ft.pkl")
lowerCAmelCase_ = F"./DialoGPT-{MODEL}"
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 338
|
"""simple docstring"""
import torch
from diffusers import StableDiffusionPipeline
lowerCAmelCase_ = '''path-to-your-trained-model'''
lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
lowerCAmelCase_ = '''A photo of sks dog in a bucket'''
lowerCAmelCase_ = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 338
| 1
|
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
__UpperCAmelCase = logging.getLogger(__name__)
__UpperCAmelCase = tf.data.AUTOTUNE
def SCREAMING_SNAKE_CASE_ ( ) -> List[str]:
SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser(description='Train a masked language model on TPU.' )
parser.add_argument(
'--pretrained_model_config' , type=snake_case_ , default='roberta-base' , help='The model config to use. Note that we don\'t copy the model\'s weights, only the config!' , )
parser.add_argument(
'--tokenizer' , type=snake_case_ , default='unigram-tokenizer-wikitext' , help='The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.' , )
parser.add_argument(
'--per_replica_batch_size' , type=snake_case_ , default=8 , help='Batch size per TPU core.' , )
parser.add_argument(
'--no_tpu' , action='store_true' , help='If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.' , )
parser.add_argument(
'--tpu_name' , type=snake_case_ , help='Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.' , default='local' , )
parser.add_argument(
'--tpu_zone' , type=snake_case_ , help='Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.' , )
parser.add_argument(
'--gcp_project' , type=snake_case_ , help='Google cloud project name. Only used for non-Colab TPU nodes.' )
parser.add_argument(
'--bfloat16' , action='store_true' , help='Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.' , )
parser.add_argument(
'--train_dataset' , type=snake_case_ , help='Path to training dataset to load. If the path begins with `gs://`'
' then the dataset will be loaded from a Google Cloud Storage bucket.' , )
parser.add_argument(
'--shuffle_buffer_size' , type=snake_case_ , default=2**18 , help='Size of the shuffle buffer (in samples)' , )
parser.add_argument(
'--eval_dataset' , type=snake_case_ , help='Path to evaluation dataset to load. If the path begins with `gs://`'
' then the dataset will be loaded from a Google Cloud Storage bucket.' , )
parser.add_argument(
'--num_epochs' , type=snake_case_ , default=1 , help='Number of epochs to train for.' , )
parser.add_argument(
'--learning_rate' , type=snake_case_ , default=1e-4 , help='Learning rate to use for training.' , )
parser.add_argument(
'--weight_decay_rate' , type=snake_case_ , default=1e-3 , help='Weight decay rate to use for training.' , )
parser.add_argument(
'--max_length' , type=snake_case_ , default=512 , help='Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py' , )
parser.add_argument(
'--mlm_probability' , type=snake_case_ , default=0.15 , help='Fraction of tokens to mask during training.' , )
parser.add_argument('--output_dir' , type=snake_case_ , required=snake_case_ , help='Path to save model checkpoints to.' )
parser.add_argument('--hub_model_id' , type=snake_case_ , help='Model ID to upload to on the Hugging Face Hub.' )
SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
return args
def SCREAMING_SNAKE_CASE_ ( snake_case_ : Tuple ) -> str:
try:
if args.tpu_name:
SCREAMING_SNAKE_CASE : List[Any] = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
SCREAMING_SNAKE_CASE : str = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '
'--gcp_project. When running on a TPU VM, use --tpu_name local.' )
tf.config.experimental_connect_to_cluster(snake_case_ )
tf.tpu.experimental.initialize_tpu_system(snake_case_ )
return tpu
def SCREAMING_SNAKE_CASE_ ( snake_case_ : List[Any] ) -> int:
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for file in file_list:
SCREAMING_SNAKE_CASE : Optional[int] = file.split('/' )[-1]
SCREAMING_SNAKE_CASE : Optional[Any] = re.search(R'-\d+-(\d+)\.tfrecord' , snake_case_ ).group(1 )
SCREAMING_SNAKE_CASE : Tuple = int(snake_case_ )
num_samples += sample_count
return num_samples
def SCREAMING_SNAKE_CASE_ ( snake_case_ : Any , snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : int=None ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : Tuple = count_samples(snake_case_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.data.Dataset.from_tensor_slices(snake_case_ )
if shuffle:
SCREAMING_SNAKE_CASE : List[Any] = dataset.shuffle(len(snake_case_ ) )
SCREAMING_SNAKE_CASE : Tuple = tf.data.TFRecordDataset(snake_case_ , num_parallel_reads=snake_case_ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
SCREAMING_SNAKE_CASE : Optional[Any] = dataset.apply(tf.data.experimental.assert_cardinality(snake_case_ ) )
SCREAMING_SNAKE_CASE : Tuple = dataset.map(snake_case_ , num_parallel_calls=snake_case_ )
if shuffle:
assert shuffle_buffer_size is not None
SCREAMING_SNAKE_CASE : List[Any] = dataset.shuffle(args.shuffle_buffer_size )
SCREAMING_SNAKE_CASE : Dict = dataset.batch(snake_case_ , drop_remainder=snake_case_ )
SCREAMING_SNAKE_CASE : Optional[int] = dataset.map(snake_case_ , num_parallel_calls=snake_case_ )
SCREAMING_SNAKE_CASE : Optional[int] = dataset.prefetch(snake_case_ )
return dataset
def SCREAMING_SNAKE_CASE_ ( snake_case_ : Union[str, Any] ) -> Dict:
if not args.no_tpu:
SCREAMING_SNAKE_CASE : Any = initialize_tpu(snake_case_ )
SCREAMING_SNAKE_CASE : int = tf.distribute.TPUStrategy(snake_case_ )
else:
SCREAMING_SNAKE_CASE : str = tf.distribute.OneDeviceStrategy(device='/gpu:0' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('mixed_bfloat16' )
SCREAMING_SNAKE_CASE : Tuple = AutoTokenizer.from_pretrained(args.tokenizer )
SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(args.pretrained_model_config )
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = tf.io.gfile.glob(os.path.join(args.train_dataset , '*.tfrecord' ) )
if not training_records:
raise ValueError(f"""No .tfrecord files found in {args.train_dataset}.""" )
SCREAMING_SNAKE_CASE : Any = tf.io.gfile.glob(os.path.join(args.eval_dataset , '*.tfrecord' ) )
if not eval_records:
raise ValueError(f"""No .tfrecord files found in {args.eval_dataset}.""" )
SCREAMING_SNAKE_CASE : str = count_samples(snake_case_ )
SCREAMING_SNAKE_CASE : str = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
SCREAMING_SNAKE_CASE : List[Any] = steps_per_epoch * args.num_epochs
with strategy.scope():
SCREAMING_SNAKE_CASE : List[str] = TFAutoModelForMaskedLM.from_config(snake_case_ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = create_optimizer(
num_train_steps=snake_case_ , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=snake_case_ , metrics=['accuracy'] )
def decode_fn(snake_case_ : Optional[int] ):
SCREAMING_SNAKE_CASE : str = {
'input_ids': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'attention_mask': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(snake_case_ , snake_case_ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
SCREAMING_SNAKE_CASE : Dict = DataCollatorForLanguageModeling(
tokenizer=snake_case_ , mlm_probability=args.mlm_probability , mlm=snake_case_ , return_tensors='tf' )
def mask_with_collator(snake_case_ : Optional[int] ):
# TF really needs an isin() function
SCREAMING_SNAKE_CASE : Any = (
~tf.cast(batch['attention_mask'] , tf.bool )
| (batch['input_ids'] == tokenizer.cls_token_id)
| (batch['input_ids'] == tokenizer.sep_token_id)
)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = data_collator.tf_mask_tokens(
batch['input_ids'] , vocab_size=len(snake_case_ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=snake_case_ , )
return batch
SCREAMING_SNAKE_CASE : int = args.per_replica_batch_size * strategy.num_replicas_in_sync
SCREAMING_SNAKE_CASE : Optional[int] = prepare_dataset(
snake_case_ , decode_fn=snake_case_ , mask_fn=snake_case_ , batch_size=snake_case_ , shuffle=snake_case_ , shuffle_buffer_size=args.shuffle_buffer_size , )
SCREAMING_SNAKE_CASE : str = prepare_dataset(
snake_case_ , decode_fn=snake_case_ , mask_fn=snake_case_ , batch_size=snake_case_ , shuffle=snake_case_ , )
SCREAMING_SNAKE_CASE : Any = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=snake_case_ ) )
model.fit(
snake_case_ , validation_data=snake_case_ , epochs=args.num_epochs , callbacks=snake_case_ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
__UpperCAmelCase = parse_args()
main(args)
| 220
|
'''simple docstring'''
from pathlib import Path
import numpy as np
from PIL import Image
def SCREAMING_SNAKE_CASE_ ( snake_case_ : np.ndarray ) -> np.ndarray:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2989 * r + 0.5870 * g + 0.1140 * b
def SCREAMING_SNAKE_CASE_ ( snake_case_ : np.ndarray ) -> np.ndarray:
return (gray > 127) & (gray <= 255)
def SCREAMING_SNAKE_CASE_ ( snake_case_ : np.ndarray , snake_case_ : np.ndarray ) -> np.ndarray:
SCREAMING_SNAKE_CASE : List[Any] = np.zeros_like(snake_case_ )
SCREAMING_SNAKE_CASE : Tuple = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
SCREAMING_SNAKE_CASE : Optional[int] = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
SCREAMING_SNAKE_CASE : Optional[int] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
SCREAMING_SNAKE_CASE : int = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
__UpperCAmelCase = Path(__file__).resolve().parent / 'image_data' / 'lena.jpg'
__UpperCAmelCase = np.array(Image.open(lena_path))
# kernel to be applied
__UpperCAmelCase = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
__UpperCAmelCase = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
__UpperCAmelCase = Image.fromarray(output).convert('RGB')
pil_img.save('result_dilation.png')
| 220
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 84
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _A ( __lowercase ):
lowercase__: Any = ['''image_processor''', '''tokenizer''']
lowercase__: Any = '''CLIPImageProcessor'''
lowercase__: Optional[Any] = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : int , __magic_name__ : Dict=None , __magic_name__ : Dict=None , **__magic_name__ : Union[str, Any] ) -> Any:
"""simple docstring"""
__snake_case : Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __magic_name__ , )
__snake_case : List[Any] = kwargs.pop("""feature_extractor""" )
__snake_case : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__magic_name__ , __magic_name__ )
def __call__( self : int , __magic_name__ : List[str]=None , __magic_name__ : Tuple=None , __magic_name__ : Any=None , **__magic_name__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
__snake_case : int = self.tokenizer(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if images is not None:
__snake_case : str = self.image_processor(__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
if text is not None and images is not None:
__snake_case : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__magic_name__ ) , tensor_type=__magic_name__ )
def lowercase__ ( self : Optional[int] , *__magic_name__ : List[Any] , **__magic_name__ : Any ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def lowercase__ ( self : List[str] , *__magic_name__ : Tuple , **__magic_name__ : List[Any] ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Dict = self.tokenizer.model_input_names
__snake_case : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __magic_name__ , )
return self.image_processor_class
@property
def lowercase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __magic_name__ , )
return self.image_processor
| 26
| 0
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = (DPMSolverSinglestepScheduler,)
_lowerCamelCase = (("""num_inference_steps""", 25),)
def snake_case_ ( self , **__A ):
__a = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
"""sample_max_value""": 1.0,
"""algorithm_type""": """dpmsolver++""",
"""solver_type""": """midpoint""",
"""lambda_min_clipped""": -float("""inf""" ),
"""variance_type""": None,
}
config.update(**__A )
return config
def snake_case_ ( self , __A=0 , **__A ):
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __A )
__a = self.dummy_sample
__a = 0.1 * sample
__a = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config(**__A )
__a = scheduler_class(**__A )
scheduler.set_timesteps(__A )
# copy over dummy past residuals
__a = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__A )
__a = scheduler_class.from_pretrained(__A )
new_scheduler.set_timesteps(__A )
# copy over dummy past residuals
__a = dummy_past_residuals[: new_scheduler.config.solver_order]
__a , __a = sample, sample
for t in range(__A , time_step + scheduler.config.solver_order + 1 ):
__a = scheduler.step(__A , __A , __A , **__A ).prev_sample
__a = new_scheduler.step(__A , __A , __A , **__A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case_ ( self ):
pass
def snake_case_ ( self , __A=0 , **__A ):
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __A )
__a = self.dummy_sample
__a = 0.1 * sample
__a = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config()
__a = scheduler_class(**__A )
scheduler.set_timesteps(__A )
# copy over dummy past residuals (must be after setting timesteps)
__a = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__A )
__a = scheduler_class.from_pretrained(__A )
# copy over dummy past residuals
new_scheduler.set_timesteps(__A )
# copy over dummy past residual (must be after setting timesteps)
__a = dummy_past_residuals[: new_scheduler.config.solver_order]
__a = scheduler.step(__A , __A , __A , **__A ).prev_sample
__a = new_scheduler.step(__A , __A , __A , **__A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case_ ( self , __A=None , **__A ):
if scheduler is None:
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(**__A )
__a = scheduler_class(**__A )
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(**__A )
__a = scheduler_class(**__A )
__a = 10
__a = self.dummy_model()
__a = self.dummy_sample_deter
scheduler.set_timesteps(__A )
for i, t in enumerate(scheduler.timesteps ):
__a = model(__A , __A )
__a = scheduler.step(__A , __A , __A ).prev_sample
return sample
def snake_case_ ( self ):
__a = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__a = 50
__a = self.dummy_model()
__a = self.dummy_sample_deter
scheduler.set_timesteps(__A )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
__a = model(__A , __A )
__a = scheduler.step(__A , __A , __A ).prev_sample
__a = torch.mean(torch.abs(__A ) )
assert abs(result_mean.item() - 0.2574 ) < 1E-3
def snake_case_ ( self ):
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=__A )
def snake_case_ ( self ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
__a = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__a = self.full_loop(scheduler=__A )
__a = torch.mean(torch.abs(__A ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
__a = DEISMultistepScheduler.from_config(scheduler.config )
__a = DPMSolverMultistepScheduler.from_config(scheduler.config )
__a = UniPCMultistepScheduler.from_config(scheduler.config )
__a = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__a = self.full_loop(scheduler=__A )
__a = torch.mean(torch.abs(__A ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def snake_case_ ( self ):
self.check_over_configs(thresholding=__A )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=__A , prediction_type=__A , sample_max_value=__A , algorithm_type="""dpmsolver++""" , solver_order=__A , solver_type=__A , )
def snake_case_ ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__A )
def snake_case_ ( self ):
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=__A , solver_type=__A , prediction_type=__A , algorithm_type=__A , )
__a = self.full_loop(
solver_order=__A , solver_type=__A , prediction_type=__A , algorithm_type=__A , )
assert not torch.isnan(__A ).any(), "Samples have nan numbers"
def snake_case_ ( self ):
self.check_over_configs(lower_order_final=__A )
self.check_over_configs(lower_order_final=__A )
def snake_case_ ( self ):
self.check_over_configs(lambda_min_clipped=-float("""inf""" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def snake_case_ ( self ):
self.check_over_configs(variance_type=__A )
self.check_over_configs(variance_type="""learned_range""" )
def snake_case_ ( self ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=__A , time_step=0 )
def snake_case_ ( self ):
__a = self.full_loop()
__a = torch.mean(torch.abs(__A ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def snake_case_ ( self ):
__a = self.full_loop(use_karras_sigmas=__A )
__a = torch.mean(torch.abs(__A ) )
assert abs(result_mean.item() - 0.2248 ) < 1E-3
def snake_case_ ( self ):
__a = self.full_loop(prediction_type="""v_prediction""" )
__a = torch.mean(torch.abs(__A ) )
assert abs(result_mean.item() - 0.1453 ) < 1E-3
def snake_case_ ( self ):
__a = self.full_loop(prediction_type="""v_prediction""" , use_karras_sigmas=__A )
__a = torch.mean(torch.abs(__A ) )
assert abs(result_mean.item() - 0.0649 ) < 1E-3
def snake_case_ ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(thresholding=__A , dynamic_thresholding_ratio=0 )
__a = scheduler_class(**__A )
__a = 10
__a = self.dummy_model()
__a = self.dummy_sample_deter.half()
scheduler.set_timesteps(__A )
for i, t in enumerate(scheduler.timesteps ):
__a = model(__A , __A )
__a = scheduler.step(__A , __A , __A ).prev_sample
assert sample.dtype == torch.floataa
| 209
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = 42
class __UpperCAmelCase ( __A , __A ):
"""simple docstring"""
@register_to_config
def __init__( self , __A = 3 , __A = 3 , __A = ("DownEncoderBlock2D",) , __A = ("UpDecoderBlock2D",) , __A = (64,) , __A = 1 , __A = "silu" , __A = 3 , __A = 32 , __A = 256 , __A = 32 , __A = None , __A = 0.18215 , __A = "group" , ):
super().__init__()
# pass init params to Encoder
__a = Encoder(
in_channels=__A , out_channels=__A , down_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , double_z=__A , )
__a = vq_embed_dim if vq_embed_dim is not None else latent_channels
__a = nn.Convad(__A , __A , 1 )
__a = VectorQuantizer(__A , __A , beta=0.25 , remap=__A , sane_index_shape=__A )
__a = nn.Convad(__A , __A , 1 )
# pass init params to Decoder
__a = Decoder(
in_channels=__A , out_channels=__A , up_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , norm_type=__A , )
@apply_forward_hook
def snake_case_ ( self , __A , __A = True ):
__a = self.encoder(__A )
__a = self.quant_conv(__A )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__A )
@apply_forward_hook
def snake_case_ ( self , __A , __A = False , __A = True ):
# also go through quantization layer
if not force_not_quantize:
__a , __a , __a = self.quantize(__A )
else:
__a = h
__a = self.post_quant_conv(__A )
__a = self.decoder(__A , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__A )
def snake_case_ ( self , __A , __A = True ):
__a = sample
__a = self.encode(__A ).latents
__a = self.decode(__A ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__A )
| 209
| 1
|
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def _A ( __magic_name__ ):
lowercase__ = [
"decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def _A ( __magic_name__ ):
lowercase__ , lowercase__ = emb.weight.shape
lowercase__ = nn.Linear(__magic_name__ , __magic_name__ , bias=__magic_name__ )
lowercase__ = emb.weight.data
return lin_layer
def _A ( __magic_name__ ):
lowercase__ = torch.load(__magic_name__ , map_location="cpu" )
lowercase__ = Namespace(**checkpoint["cfg"]["model"] )
lowercase__ = checkpoint["model"]
remove_ignore_keys_(__magic_name__ )
lowercase__ = state_dict["decoder.embed_tokens.weight"].shape[0]
lowercase__ = {key.replace("decoder" , "model" ): val for key, val in state_dict.items()}
lowercase__ = XGLMConfig(
vocab_size=__magic_name__ , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="gelu" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
lowercase__ = XGLMForCausalLM(__magic_name__ )
lowercase__ = model.load_state_dict(__magic_name__ , strict=__magic_name__ )
print(__magic_name__ )
lowercase__ = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
_snake_case = parser.parse_args()
_snake_case = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 655
|
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = """▁"""
_snake_case = {"""vocab_file""": """vocab.txt""", """sentencepiece_model_ckpt""": """sentencepiece.bpe.model"""}
_snake_case = {
"""sentencepiece_model_file""": """sentencepiece.bpe.model""",
"""vocab_file""": """vocab.txt""",
}
_snake_case = {
"""vocab_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
},
"""sentencepiece_model_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
},
}
_snake_case = {
"""ernie-m-base""": 514,
"""ernie-m-large""": 514,
}
_snake_case = {
"""ernie-m-base""": {"""do_lower_case""": False},
"""ernie-m-large""": {"""do_lower_case""": False},
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = ["input_ids"]
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = RESOURCE_FILES_NAMES
def __init__( self :Union[str, Any] , _lowercase :Union[str, Any] , _lowercase :Optional[Any]=None , _lowercase :List[str]=False , _lowercase :Dict="utf8" , _lowercase :Optional[Any]="[UNK]" , _lowercase :Optional[int]="[SEP]" , _lowercase :List[str]="[PAD]" , _lowercase :Dict="[CLS]" , _lowercase :Optional[Any]="[MASK]" , _lowercase :Optional[Dict[str, Any]] = None , **_lowercase :Tuple , ):
'''simple docstring'''
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , vocab_file=_lowercase , encoding=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
lowercase__ = do_lower_case
lowercase__ = sentencepiece_model_ckpt
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowercase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowercase__ = self.load_vocab(filepath=_lowercase )
else:
lowercase__ = {self.sp_model.id_to_piece(_lowercase ): id for id in range(self.sp_model.get_piece_size() )}
lowercase__ = {v: k for k, v in self.vocab.items()}
def UpperCAmelCase ( self :Any , _lowercase :Dict ):
'''simple docstring'''
if text is None:
return None
lowercase__ = self.tokenize(_lowercase )
lowercase__ , lowercase__ = "", []
for i, ch in enumerate(_lowercase ):
if ch in self.SP_CHAR_MAPPING:
lowercase__ = self.SP_CHAR_MAPPING.get(_lowercase )
else:
lowercase__ = unicodedata.normalize("NFKC" , _lowercase )
if self.is_whitespace(_lowercase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_lowercase ) )
lowercase__ , lowercase__ , lowercase__ = normalized_text, [], 0
if self.do_lower_case:
lowercase__ = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowercase__ = token[1:]
lowercase__ = text[offset:].index(_lowercase ) + offset
lowercase__ = start + len(_lowercase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowercase__ = end
return token_mapping
@property
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
return len(self.vocab )
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self :Any ):
'''simple docstring'''
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__( self :Optional[Any] , _lowercase :Dict ):
'''simple docstring'''
lowercase__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCAmelCase ( self :List[str] , _lowercase :Optional[Any] ):
'''simple docstring'''
return "".join((self.SP_CHAR_MAPPING.get(_lowercase , _lowercase ) for c in text) )
def UpperCAmelCase ( self :str , _lowercase :int , _lowercase :Union[str, Any]=False , _lowercase :Optional[int]=64 , _lowercase :Any=0.1 ):
'''simple docstring'''
if self.sp_model_kwargs.get("enable_sampling" ) is True:
lowercase__ = True
if self.sp_model_kwargs.get("alpha" ) is not None:
lowercase__ = self.sp_model_kwargs.get("alpha" )
if self.sp_model_kwargs.get("nbest_size" ) is not None:
lowercase__ = self.sp_model_kwargs.get("nbest_size" )
if not enable_sampling:
lowercase__ = self.sp_model.EncodeAsPieces(_lowercase )
else:
lowercase__ = self.sp_model.SampleEncodeAsPieces(_lowercase , _lowercase , _lowercase )
lowercase__ = []
for pi, piece in enumerate(_lowercase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_lowercase ) and pi != 0:
new_pieces.append(_lowercase )
continue
else:
continue
lowercase__ = 0
for i, chunk in enumerate(_lowercase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_lowercase ) or self.is_punct(_lowercase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_lowercase )
lowercase__ = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase__ = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase__ = i
if len(_lowercase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCAmelCase ( self :Tuple , _lowercase :Tuple ):
'''simple docstring'''
lowercase__ = "".join(_lowercase ).replace(_lowercase , " " ).strip()
return out_string
def UpperCAmelCase ( self :Any , _lowercase :str ):
'''simple docstring'''
lowercase__ = self.convert_ids_to_tokens(_lowercase )
lowercase__ = "".join(_lowercase ).replace(_lowercase , " " ).strip()
return out_string
def UpperCAmelCase ( self :Union[str, Any] , _lowercase :Optional[int] ):
'''simple docstring'''
return self.vocab.get(_lowercase , self.vocab.get(self.unk_token ) )
def UpperCAmelCase ( self :Tuple , _lowercase :List[str] ):
'''simple docstring'''
return self.reverse_vocab.get(_lowercase , self.unk_token )
def UpperCAmelCase ( self :Any , _lowercase :Any , _lowercase :Tuple=None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCAmelCase ( self :Dict , _lowercase :int , _lowercase :Union[str, Any]=None ):
'''simple docstring'''
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCAmelCase ( self :Optional[Any] , _lowercase :Union[str, Any] , _lowercase :Dict=None , _lowercase :Optional[Any]=False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1, 1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1]
def UpperCAmelCase ( self :int , _lowercase :List[int] , _lowercase :Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_lowercase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_lowercase ) + 1) + [1] * (len(_lowercase ) + 3)
def UpperCAmelCase ( self :str , _lowercase :Optional[int] ):
'''simple docstring'''
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCAmelCase ( self :Tuple , _lowercase :List[str] ):
'''simple docstring'''
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCAmelCase ( self :int , _lowercase :Dict ):
'''simple docstring'''
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCAmelCase ( self :List[str] , _lowercase :List[str] ):
'''simple docstring'''
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_lowercase ) == 1:
lowercase__ = unicodedata.category(_lowercase )
if cat == "Zs":
return True
return False
def UpperCAmelCase ( self :int , _lowercase :Optional[int] ):
'''simple docstring'''
lowercase__ = {}
with io.open(_lowercase , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(_lowercase ):
lowercase__ = line.rstrip("\n" )
lowercase__ = int(_lowercase )
return token_to_idx
def UpperCAmelCase ( self :List[str] , _lowercase :str , _lowercase :Optional[str] = None ):
'''simple docstring'''
lowercase__ = 0
if os.path.isdir(_lowercase ):
lowercase__ = os.path.join(
_lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
lowercase__ = (filename_prefix + "-" if filename_prefix else "") + save_directory
with open(_lowercase , "w" , encoding="utf-8" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _lowercase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
" Please check that the vocabulary is not corrupted!" )
lowercase__ = token_index
writer.write(token + "\n" )
index += 1
lowercase__ = os.path.join(_lowercase , "sentencepiece.bpe.model" )
with open(_lowercase , "wb" ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(_lowercase )
return (vocab_file,)
| 655
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase_ : Optional[int] = {
"""configuration_lxmert""": ["""LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LxmertConfig"""],
"""tokenization_lxmert""": ["""LxmertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[Any] = ["""LxmertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : Optional[int] = [
"""LxmertEncoder""",
"""LxmertForPreTraining""",
"""LxmertForQuestionAnswering""",
"""LxmertModel""",
"""LxmertPreTrainedModel""",
"""LxmertVisualFeatureEncoder""",
"""LxmertXLayer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ : List[Any] = [
"""TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLxmertForPreTraining""",
"""TFLxmertMainLayer""",
"""TFLxmertModel""",
"""TFLxmertPreTrainedModel""",
"""TFLxmertVisualFeatureEncoder""",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
lowerCamelCase_ : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 704
|
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
lowerCamelCase_ : Any = datasets.utils.logging.get_logger(__name__)
@dataclass
class a__ ( datasets.BuilderConfig ):
A__ : Optional[datasets.Features] = None
A__ : str = "utf-8"
A__ : Optional[str] = None
A__ : Optional[str] = None
A__ : bool = True # deprecated
A__ : Optional[int] = None # deprecated
A__ : int = 10 << 20 # 10MB
A__ : Optional[bool] = None
class a__ ( datasets.ArrowBasedBuilder ):
A__ : str = JsonConfig
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
if self.config.block_size is not None:
logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead' )
__a = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.' )
if self.config.newlines_in_values is not None:
raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported' )
return datasets.DatasetInfo(features=self.config.features )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> Tuple:
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
__a = dl_manager.download_and_extract(self.config.data_files )
if isinstance(UpperCAmelCase , (str, list, tuple) ):
__a = data_files
if isinstance(UpperCAmelCase , UpperCAmelCase ):
__a = [files]
__a = [dl_manager.iter_files(UpperCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
__a = []
for split_name, files in data_files.items():
if isinstance(UpperCAmelCase , UpperCAmelCase ):
__a = [files]
__a = [dl_manager.iter_files(UpperCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=UpperCAmelCase , gen_kwargs={'files': files} ) )
return splits
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
__a = self.config.features.arrow_schema.field(UpperCAmelCase ).type
__a = pa_table.append_column(UpperCAmelCase , pa.array([None] * len(UpperCAmelCase ) , type=UpperCAmelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
__a = table_cast(UpperCAmelCase , self.config.features.arrow_schema )
return pa_table
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> Dict:
for file_idx, file in enumerate(itertools.chain.from_iterable(UpperCAmelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(UpperCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
__a = json.load(UpperCAmelCase )
# We keep only the field we are interested in
__a = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(UpperCAmelCase , (list, tuple) ):
__a = set().union(*[row.keys() for row in dataset] )
__a = {col: [row.get(UpperCAmelCase ) for row in dataset] for col in keys}
else:
__a = dataset
__a = pa.Table.from_pydict(UpperCAmelCase )
yield file_idx, self._cast_table(UpperCAmelCase )
# If the file has one json object per line
else:
with open(UpperCAmelCase , 'rb' ) as f:
__a = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
__a = max(self.config.chunksize // 3_2 , 1_6 << 1_0 )
__a = (
self.config.encoding_errors if self.config.encoding_errors is not None else 'strict'
)
while True:
__a = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(UpperCAmelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
__a = batch.decode(self.config.encoding , errors=UpperCAmelCase ).encode('utf-8' )
try:
while True:
try:
__a = paj.read_json(
io.BytesIO(UpperCAmelCase ) , read_options=paj.ReadOptions(block_size=UpperCAmelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(UpperCAmelCase , pa.ArrowInvalid )
and "straddling" not in str(UpperCAmelCase )
or block_size > len(UpperCAmelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f'''Batch of {len(UpperCAmelCase )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
UpperCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
__a = json.load(UpperCAmelCase )
except json.JSONDecodeError:
logger.error(f'''Failed to read file \'{file}\' with error {type(UpperCAmelCase )}: {e}''' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(UpperCAmelCase , UpperCAmelCase ): # list is the only sequence type supported in JSON
try:
__a = set().union(*[row.keys() for row in dataset] )
__a = {col: [row.get(UpperCAmelCase ) for row in dataset] for col in keys}
__a = pa.Table.from_pydict(UpperCAmelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(UpperCAmelCase )}: {e}''' )
raise ValueError(f'''Not able to read records in the JSON file at {file}.''' ) from None
yield file_idx, self._cast_table(UpperCAmelCase )
break
else:
logger.error(f'''Failed to read file \'{file}\' with error {type(UpperCAmelCase )}: {e}''' )
raise ValueError(
f'''Not able to read records in the JSON file at {file}. '''
f'''You should probably indicate the field of the JSON file containing your records. '''
f'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '''
f'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(UpperCAmelCase )
batch_idx += 1
| 246
| 0
|
__A = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__A = [{"type": "code", "content": INSTALL_CONTENT}]
__A = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 68
|
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE ) < 2:
return collection
def circle_sort_util(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> bool:
A_ = False
if low == high:
return swapped
A_ = low
A_ = high
while left < right:
if collection[left] > collection[right]:
A_ ,A_ = (
collection[right],
collection[left],
)
A_ = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
A_ ,A_ = (
collection[right + 1],
collection[left],
)
A_ = True
A_ = low + int((high - low) / 2 )
A_ = circle_sort_util(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ = circle_sort_util(SCREAMING_SNAKE_CASE , mid + 1 , SCREAMING_SNAKE_CASE )
return swapped or left_swap or right_swap
A_ = True
while is_not_sorted is True:
A_ = circle_sort_util(SCREAMING_SNAKE_CASE , 0 , len(SCREAMING_SNAKE_CASE ) - 1 )
return collection
if __name__ == "__main__":
__lowercase = input("""Enter numbers separated by a comma:\n""").strip()
__lowercase = [int(item) for item in user_input.split(""",""")]
print(circle_sort(unsorted))
| 203
| 0
|
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"snap-research/efficientformer-l1-300": (
"https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
),
}
class _lowerCAmelCase ( __a ):
_lowercase ='''efficientformer'''
def __init__( self , _UpperCamelCase = [3, 2, 6, 4] , _UpperCamelCase = [48, 96, 224, 448] , _UpperCamelCase = [True, True, True, True] , _UpperCamelCase = 448 , _UpperCamelCase = 32 , _UpperCamelCase = 4 , _UpperCamelCase = 7 , _UpperCamelCase = 5 , _UpperCamelCase = 8 , _UpperCamelCase = 4 , _UpperCamelCase = 0.0 , _UpperCamelCase = 16 , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = 3 , _UpperCamelCase = 2 , _UpperCamelCase = 1 , _UpperCamelCase = 0.0 , _UpperCamelCase = 1 , _UpperCamelCase = True , _UpperCamelCase = True , _UpperCamelCase = 1e-5 , _UpperCamelCase = "gelu" , _UpperCamelCase = 0.02 , _UpperCamelCase = 1e-1_2 , _UpperCamelCase = 224 , _UpperCamelCase = 1e-0_5 , **_UpperCamelCase , ) -> Optional[int]:
super().__init__(**__lowerCAmelCase )
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = hidden_sizes
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = depths
lowerCAmelCase_ = mlp_expansion_ratio
lowerCAmelCase_ = downsamples
lowerCAmelCase_ = dim
lowerCAmelCase_ = key_dim
lowerCAmelCase_ = attention_ratio
lowerCAmelCase_ = resolution
lowerCAmelCase_ = pool_size
lowerCAmelCase_ = downsample_patch_size
lowerCAmelCase_ = downsample_stride
lowerCAmelCase_ = downsample_pad
lowerCAmelCase_ = drop_path_rate
lowerCAmelCase_ = num_metaad_blocks
lowerCAmelCase_ = distillation
lowerCAmelCase_ = use_layer_scale
lowerCAmelCase_ = layer_scale_init_value
lowerCAmelCase_ = image_size
lowerCAmelCase_ = batch_norm_eps
| 707
|
def lowerCamelCase__ ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : set ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = len(__lowerCAmelCase ), len(grid[0] )
if (
min(__lowerCAmelCase , __lowerCAmelCase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
lowerCAmelCase_ = 0
count += depth_first_search(__lowerCAmelCase , row + 1 , __lowerCAmelCase , __lowerCAmelCase )
count += depth_first_search(__lowerCAmelCase , row - 1 , __lowerCAmelCase , __lowerCAmelCase )
count += depth_first_search(__lowerCAmelCase , __lowerCAmelCase , col + 1 , __lowerCAmelCase )
count += depth_first_search(__lowerCAmelCase , __lowerCAmelCase , col - 1 , __lowerCAmelCase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 279
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
lowerCAmelCase: Any = logging.get_logger(__name__)
lowerCAmelCase: Tuple = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class a__( lowerCamelCase__ ):
lowercase__ = """deberta-v2"""
def __init__( self : str , __snake_case : List[str]=12_81_00 , __snake_case : str=15_36 , __snake_case : Optional[int]=24 , __snake_case : Union[str, Any]=24 , __snake_case : Optional[int]=61_44 , __snake_case : Tuple="gelu" , __snake_case : str=0.1 , __snake_case : Optional[Any]=0.1 , __snake_case : Dict=5_12 , __snake_case : Optional[int]=0 , __snake_case : List[Any]=0.02 , __snake_case : str=1e-7 , __snake_case : Optional[int]=False , __snake_case : Optional[int]=-1 , __snake_case : int=0 , __snake_case : List[Any]=True , __snake_case : Tuple=None , __snake_case : Optional[int]=0 , __snake_case : Optional[int]="gelu" , **__snake_case : int , ):
super().__init__(**__lowerCAmelCase )
a : Any = hidden_size
a : Optional[Any] = num_hidden_layers
a : List[str] = num_attention_heads
a : Optional[int] = intermediate_size
a : List[str] = hidden_act
a : Dict = hidden_dropout_prob
a : Dict = attention_probs_dropout_prob
a : Any = max_position_embeddings
a : Optional[int] = type_vocab_size
a : Dict = initializer_range
a : Any = relative_attention
a : str = max_relative_positions
a : Optional[Any] = pad_token_id
a : Dict = position_biased_input
# Backwards compatibility
if type(__lowerCAmelCase ) == str:
a : int = [x.strip() for x in pos_att_type.lower().split('|' )]
a : Tuple = pos_att_type
a : Optional[int] = vocab_size
a : Optional[int] = layer_norm_eps
a : Optional[Any] = kwargs.get('pooler_hidden_size' , __lowerCAmelCase )
a : List[str] = pooler_dropout
a : List[str] = pooler_hidden_act
class a__( lowerCamelCase__ ):
@property
def lowercase_ ( self : Optional[Any] ):
if self.task == "multiple-choice":
a : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
a : Optional[Any] = {0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def lowercase_ ( self : int ):
return 12
def lowercase_ ( self : Tuple , __snake_case : List[str] , __snake_case : Any = -1 , __snake_case : Optional[int] = -1 , __snake_case : Optional[int] = -1 , __snake_case : List[str] = False , __snake_case : List[Any] = None , __snake_case : Any = 3 , __snake_case : Optional[int] = 40 , __snake_case : Optional[int] = 40 , __snake_case : Any = None , ):
a : int = super().generate_dummy_inputs(preprocessor=__lowerCAmelCase , framework=__lowerCAmelCase )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 526
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = TFXLMRobertaModel.from_pretrained('''jplu/tf-xlm-roberta-base''' )
lowerCamelCase__ = {
'''input_ids''': tf.convert_to_tensor([[0, 2_6_4_6, 1_0_2_6_9, 8_3, 9_9_9_4_2, 2]] , dtype=tf.intaa ), # "My dog is cute"
'''attention_mask''': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
lowerCamelCase__ = model(__lowerCAmelCase )['''last_hidden_state''']
lowerCamelCase__ = tf.TensorShape((1, 6, 7_6_8) )
self.assertEqual(output.shape , __lowerCAmelCase )
# compare the actual values for a slice.
lowerCamelCase__ = tf.convert_to_tensor(
[
[
[0.068_1762, 0.1089_4451, 0.0677_2504],
[-0.0642_3668, 0.0236_6615, 0.0432_9344],
[-0.0605_7295, 0.0997_4135, -0.0007_0584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 481
| 0
|
import fire
from utils import calculate_rouge, save_json
def UpperCamelCase (lowercase_: List[Any] , lowercase_: List[str] , lowercase_: Union[str, Any]=None , **lowercase_: List[Any] ) -> List[Any]:
A__ : List[Any] = [x.strip() for x in open(lowercase_ ).readlines()]
A__ : Optional[Any] = [x.strip() for x in open(lowercase_ ).readlines()][: len(lowercase_ )]
A__ : List[Any] = calculate_rouge(lowercase_ , lowercase_ , **lowercase_ )
if save_path is not None:
save_json(lowercase_ , lowercase_ , indent=lowercase_ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 717
|
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
A_ : Any = logging.getLogger(__name__)
def UpperCamelCase (lowercase_: Optional[Any]=2 , lowercase_: Union[str, Any]=3 , lowercase_: int=16 , lowercase_: int = 10 , lowercase_: int = 2 ) -> int:
def get_dataset(lowercase_: Optional[int] ):
A__ : Optional[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(lowercase_ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
A__ : Dict = get_dataset(lowercase_ )
A__ : Any = get_dataset(lowercase_ )
A__ : Dict = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
A__ : Optional[Any] = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def UpperCamelCase (lowercase_: Optional[Any] , lowercase_: List[str] , lowercase_: int , lowercase_: int , lowercase_: List[str] , lowercase_: Dict=None ) -> List[Any]:
A__ : List[Any] = []
for epoch in range(lowercase_ ):
# Train quickly
model.train()
for batch in dataloader:
A__ , A__ : Any = batch
A__ : Any = model(lowercase_ )
A__ : Any = torch.nn.functional.mse_loss(lowercase_ , lowercase_ )
accelerator.backward(lowercase_ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class _a (nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
A__ : str = nn.Parameter(torch.randn(1 ) )
A__ : Any = nn.Parameter(torch.randn(1 ) )
def __A ( self , A__ ):
return x * self.a + self.b
class _a (unittest.TestCase ):
'''simple docstring'''
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Optional[Any] = DummyModel()
A__ : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : str = dummy_dataloaders()
A__ : Dict = ProjectConfiguration(total_limit=1 , project_dir=A__ , automatic_checkpoint_naming=A__ )
# Train baseline
A__ : List[str] = Accelerator(project_config=A__ )
A__ , A__ , A__ , A__ : Any = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : str = DummyModel()
A__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : int = dummy_dataloaders()
# Train baseline
A__ : str = Accelerator()
A__ , A__ , A__ , A__ : List[str] = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
A__ : List[Any] = os.path.join(A__ , """initial""" )
accelerator.save_state(A__ )
((A__) , (A__)) : str = model.a.item(), model.b.item()
A__ : Dict = optimizer.state_dict()
A__ : List[str] = train(3 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : str = model.a.item(), model.b.item()
A__ : Any = optimizer.state_dict()
# Train partially
set_seed(42 )
A__ : Optional[int] = DummyModel()
A__ : Dict = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : Dict = dummy_dataloaders()
A__ : List[str] = Accelerator()
A__ , A__ , A__ , A__ : Optional[Any] = accelerator.prepare(
A__ , A__ , A__ , A__ )
accelerator.load_state(A__ )
((A__) , (A__)) : Tuple = model.a.item(), model.b.item()
A__ : Union[str, Any] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
A__ : List[str] = train(2 , A__ , A__ , A__ , A__ )
# Save everything
A__ : Optional[int] = os.path.join(A__ , """checkpoint""" )
accelerator.save_state(A__ )
# Load everything back in and make sure all states work
accelerator.load_state(A__ )
test_rands += train(1 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Union[str, Any] = model.a.item(), model.b.item()
A__ : Optional[int] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : int = DummyModel()
A__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : List[str] = dummy_dataloaders()
A__ : str = ProjectConfiguration(automatic_checkpoint_naming=A__ )
# Train baseline
A__ : Any = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ : str = accelerator.prepare(
A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
((A__) , (A__)) : Tuple = model.a.item(), model.b.item()
A__ : int = optimizer.state_dict()
A__ : int = train(3 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Optional[Any] = model.a.item(), model.b.item()
A__ : Any = optimizer.state_dict()
# Train partially
set_seed(42 )
A__ : Dict = DummyModel()
A__ : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ , A__ : Union[str, Any] = dummy_dataloaders()
A__ : List[Any] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=A__ )
A__ : Dict = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ : Union[str, Any] = accelerator.prepare(
A__ , A__ , A__ , A__ )
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) )
((A__) , (A__)) : Optional[int] = model.a.item(), model.b.item()
A__ : Tuple = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
A__ : str = train(2 , A__ , A__ , A__ , A__ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_1""" ) )
test_rands += train(1 , A__ , A__ , A__ , A__ )
((A__) , (A__)) : Optional[int] = model.a.item(), model.b.item()
A__ : List[Any] = optimizer.state_dict()
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
def __A ( self ):
A__ : Union[str, Any] = torch.tensor([1, 2, 3] )
A__ : int = torch.tensor([2, 3, 4] )
A__ : List[Any] = DummyModel()
A__ : List[Any] = torch.optim.Adam(net.parameters() )
A__ : Tuple = Accelerator()
with self.assertRaises(A__ ) as ve:
accelerator.register_for_checkpointing(A__ , A__ , A__ , A__ )
A__ : Any = str(ve.exception )
self.assertTrue("""Item at index 0""" in message )
self.assertTrue("""Item at index 1""" in message )
self.assertFalse("""Item at index 2""" in message )
self.assertFalse("""Item at index 3""" in message )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Any = DummyModel()
A__ : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
A__ : Dict = torch.optim.lr_scheduler.StepLR(A__ , step_size=1 , gamma=0.9_9 )
A__ , A__ : List[Any] = dummy_dataloaders()
A__ : Tuple = ProjectConfiguration(automatic_checkpoint_naming=A__ )
# Train baseline
A__ : Optional[Any] = Accelerator(project_dir=A__ , project_config=A__ )
A__ , A__ , A__ , A__ , A__ : Union[str, Any] = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# Save initial
accelerator.save_state()
A__ : Tuple = scheduler.state_dict()
train(3 , A__ , A__ , A__ , A__ , A__ )
self.assertNotEqual(A__ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) )
self.assertEqual(A__ , scheduler.state_dict() )
def __A ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A__ : Optional[Any] = DummyModel()
A__ : int = ProjectConfiguration(automatic_checkpoint_naming=A__ , total_limit=2 )
# Train baseline
A__ : List[str] = Accelerator(project_dir=A__ , project_config=A__ )
A__ : Union[str, Any] = accelerator.prepare(A__ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_9""" ) ) )
self.assertTrue(os.path.exists(os.path.join(A__ , """checkpoints""" , """checkpoint_10""" ) ) )
@require_cuda
def __A ( self ):
A__ : Dict = ["""torchrun""", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(A__ , env=os.environ.copy() )
if __name__ == "__main__":
A_ : List[str] = '/tmp/accelerate/state_checkpointing'
A_ : Optional[Any] = DummyModel()
A_ : Union[str, Any] = torch.optim.Adam(params=model.parameters(), lr=1E-3)
A_ : str = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
A_ , A_ : List[Any] = dummy_dataloaders()
A_ : int = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
A_ : List[str] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
A_ , A_ , A_ , A_ , A_ : List[Any] = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
A_ , A_ : Dict = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
A_ : str = group['params'][0].device
break
assert param_device.type == accelerator.device.type
A_ : Optional[Any] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
A_ : str = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
A_ : Tuple = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 64
| 0
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__lowercase = ['''gpt2''']
__lowercase = '''gpt2'''
if is_tf_available():
class lowerCamelCase_ ( tf.Module ):
'''simple docstring'''
def __init__( self , __lowercase) -> Optional[int]:
super().__init__()
__UpperCamelCase :Tuple = tokenizer
__UpperCamelCase :str = AutoConfig.from_pretrained(__lowercase)
__UpperCamelCase :Tuple = TFGPTaLMHeadModel.from_config(__lowercase)
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text'''),))
def UpperCamelCase__ ( self , __lowercase) -> Dict:
__UpperCamelCase :str = self.tokenizer(__lowercase)
__UpperCamelCase :str = tokenized['''input_ids'''].to_tensor()
__UpperCamelCase :Optional[int] = tf.cast(input_ids_dense > 0 , tf.intaa)
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__UpperCamelCase :Any = self.model(input_ids=__lowercase , attention_mask=__lowercase)['''logits''']
return outputs
@require_tf
@require_keras_nlp
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> Tuple:
super().setUp()
__UpperCamelCase :Optional[Any] = [GPTaTokenizer.from_pretrained(__lowercase) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__UpperCamelCase :List[Any] = [TFGPTaTokenizer.from_pretrained(__lowercase) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers) == len(self.tf_tokenizers)
__UpperCamelCase :Dict = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
__UpperCamelCase :Optional[Any] = list(zip(self.test_sentences , self.test_sentences[::-1]))
def UpperCamelCase__ ( self) -> Tuple:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers):
for test_inputs in self.test_sentences:
__UpperCamelCase :str = tokenizer([test_inputs] , return_tensors='''tf''')
__UpperCamelCase :List[str] = tf_tokenizer([test_inputs])
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__UpperCamelCase :List[str] = python_outputs[key].numpy()
__UpperCamelCase :Optional[Any] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape))
self.assertTrue(tf.reduce_all(tf.cast(__lowercase , tf.intaa) == tf_outputs_values))
@slow
def UpperCamelCase__ ( self) -> Union[str, Any]:
for tf_tokenizer in self.tf_tokenizers:
__UpperCamelCase :Tuple = tf.function(__lowercase)
for test_inputs in self.test_sentences:
__UpperCamelCase :Union[str, Any] = tf.constant(__lowercase)
__UpperCamelCase :Optional[Any] = compiled_tokenizer(__lowercase)
__UpperCamelCase :Union[str, Any] = tf_tokenizer(__lowercase)
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]))
@slow
def UpperCamelCase__ ( self) -> List[Any]:
for tf_tokenizer in self.tf_tokenizers:
__UpperCamelCase :Optional[Any] = ModelToSave(tokenizer=__lowercase)
__UpperCamelCase :Union[str, Any] = tf.convert_to_tensor([self.test_sentences[0]])
__UpperCamelCase :Any = model.serving(__lowercase) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__UpperCamelCase :Optional[int] = Path(__lowercase) / '''saved.model'''
tf.saved_model.save(__lowercase , __lowercase , signatures={'''serving_default''': model.serving})
__UpperCamelCase :Optional[int] = tf.saved_model.load(__lowercase)
__UpperCamelCase :Tuple = loaded_model.signatures['''serving_default'''](__lowercase)['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output))
@slow
def UpperCamelCase__ ( self) -> Optional[Any]:
for tf_tokenizer in self.tf_tokenizers:
__UpperCamelCase :List[str] = tf.convert_to_tensor([self.test_sentences[0]])
__UpperCamelCase :Union[str, Any] = tf_tokenizer(__lowercase) # Build model with some sample inputs
__UpperCamelCase :Union[str, Any] = tf_tokenizer.get_config()
__UpperCamelCase :Union[str, Any] = TFGPTaTokenizer.from_config(__lowercase)
__UpperCamelCase :Any = model_from_config(__lowercase)
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key]))
@slow
def UpperCamelCase__ ( self) -> Any:
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__UpperCamelCase :Tuple = 123_123
for max_length in [3, 5, 1_024]:
__UpperCamelCase :Optional[int] = tf.convert_to_tensor([self.test_sentences[0]])
__UpperCamelCase :List[Any] = tf_tokenizer(__lowercase , max_length=__lowercase)
__UpperCamelCase :Optional[Any] = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 167
|
from __future__ import annotations
import numpy as np
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase :Optional[Any] = np.shape(SCREAMING_SNAKE_CASE )
if rows != columns:
__UpperCamelCase :Dict = (
'''\'table\' has to be of square shaped array but got a '''
f"""{rows}x{columns} array:\n{table}"""
)
raise ValueError(SCREAMING_SNAKE_CASE )
__UpperCamelCase :int = np.zeros((rows, columns) )
__UpperCamelCase :Tuple = np.zeros((rows, columns) )
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Union[str, Any] = sum(lower[i][k] * upper[k][j] for k in range(SCREAMING_SNAKE_CASE ) )
if upper[j][j] == 0:
raise ArithmeticError('''No LU decomposition exists''' )
__UpperCamelCase :Tuple = (table[i][j] - total) / upper[j][j]
__UpperCamelCase :Optional[int] = 1
for j in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :List[str] = sum(lower[i][k] * upper[k][j] for k in range(SCREAMING_SNAKE_CASE ) )
__UpperCamelCase :Dict = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 167
| 1
|
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A_ ( __a : str = "laptop" ):
"""simple docstring"""
a__ = F'''https://www.amazon.in/laptop/s?k={product}'''
a__ = {
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
a__ = BeautifulSoup(requests.get(__a , headers=__a ).text )
# Initialize a Pandas dataframe with the column titles
a__ = DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ):
try:
a__ = item.ha.text
a__ = """https://www.amazon.in/""" + item.ha.a["""href"""]
a__ = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text
try:
a__ = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
a__ = """Not available"""
try:
a__ = (
"""₹"""
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
a__ = """"""
try:
a__ = float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""" , """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
)
* 100 )
except ValueError:
a__ = float("""nan""" )
except AttributeError:
pass
a__ = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
a__ = """ """
a__ = """ """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
UpperCAmelCase = """headphones"""
get_amazon_product_data(product).to_csv(f"""Amazon Product Data for {product}.csv""")
| 351
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class __snake_case ( unittest.TestCase):
'''simple docstring'''
def _a ( self ):
a__ = tempfile.mkdtemp()
# fmt: off
a__ = ["""""", """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
a__ = dict(zip(a_ , range(len(a_ ) ) ) )
a__ = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
a__ = {"""unk_token""": """<unk>"""}
a__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
a__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a_ ) )
a__ = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
a__ = os.path.join(self.tmpdirname , a_ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(a_ , a_ )
def _a ( self , **a_ ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="""!""" , **a_ )
def _a ( self , **a_ ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="""!""" , **a_ )
def _a ( self , **a_ ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **a_ )
def _a ( self ):
shutil.rmtree(self.tmpdirname )
def _a ( self ):
a__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a__ = [Image.fromarray(np.moveaxis(a_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a ( self ):
a__ = self.get_tokenizer()
a__ = self.get_rust_tokenizer()
a__ = self.get_image_processor()
a__ = OwlViTProcessor(tokenizer=a_ , image_processor=a_ )
processor_slow.save_pretrained(self.tmpdirname )
a__ = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=a_ )
a__ = OwlViTProcessor(tokenizer=a_ , image_processor=a_ )
processor_fast.save_pretrained(self.tmpdirname )
a__ = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , a_ )
self.assertIsInstance(processor_fast.tokenizer , a_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , a_ )
self.assertIsInstance(processor_fast.image_processor , a_ )
def _a ( self ):
a__ = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
a__ = self.get_image_processor(do_normalize=a_ )
a__ = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=a_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , a_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a_ )
def _a ( self ):
a__ = self.get_image_processor()
a__ = self.get_tokenizer()
a__ = OwlViTProcessor(tokenizer=a_ , image_processor=a_ )
a__ = self.prepare_image_inputs()
a__ = image_processor(a_ , return_tensors="""np""" )
a__ = processor(images=a_ , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self ):
a__ = self.get_image_processor()
a__ = self.get_tokenizer()
a__ = OwlViTProcessor(tokenizer=a_ , image_processor=a_ )
a__ = """lower newer"""
a__ = processor(text=a_ , return_tensors="""np""" )
a__ = tokenizer(a_ , return_tensors="""np""" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def _a ( self ):
a__ = self.get_image_processor()
a__ = self.get_tokenizer()
a__ = OwlViTProcessor(tokenizer=a_ , image_processor=a_ )
a__ = """lower newer"""
a__ = self.prepare_image_inputs()
a__ = processor(text=a_ , images=a_ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(a_ ):
processor()
def _a ( self ):
a__ = """google/owlvit-base-patch32"""
a__ = OwlViTProcessor.from_pretrained(a_ )
a__ = ["""cat""", """nasa badge"""]
a__ = processor(text=a_ )
a__ = 16
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(a_ ):
processor()
def _a ( self ):
a__ = """google/owlvit-base-patch32"""
a__ = OwlViTProcessor.from_pretrained(a_ )
a__ = [["""cat""", """nasa badge"""], ["""person"""]]
a__ = processor(text=a_ )
a__ = 16
a__ = len(a_ )
a__ = max([len(a_ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(a_ ):
processor()
def _a ( self ):
a__ = """google/owlvit-base-patch32"""
a__ = OwlViTProcessor.from_pretrained(a_ )
a__ = ["""cat""", """nasa badge"""]
a__ = processor(text=a_ )
a__ = 16
a__ = inputs["""input_ids"""]
a__ = [
[49_406, 2_368, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49_406, 6_841, 11_301, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def _a ( self ):
a__ = self.get_image_processor()
a__ = self.get_tokenizer()
a__ = OwlViTProcessor(tokenizer=a_ , image_processor=a_ )
a__ = self.prepare_image_inputs()
a__ = self.prepare_image_inputs()
a__ = processor(images=a_ , query_images=a_ )
self.assertListEqual(list(inputs.keys() ) , ["""query_pixel_values""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(a_ ):
processor()
def _a ( self ):
a__ = self.get_image_processor()
a__ = self.get_tokenizer()
a__ = OwlViTProcessor(tokenizer=a_ , image_processor=a_ )
a__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ = processor.batch_decode(a_ )
a__ = tokenizer.batch_decode(a_ )
self.assertListEqual(a_ , a_ )
| 351
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.