code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Dict = logging.get_logger(__name__)
__lowerCamelCase :List[Any] = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class A__ ( _a):
"""simple docstring"""
snake_case__ : List[str] ='''megatron-bert'''
def __init__( self: Optional[int] , __a: Tuple=29_056 , __a: Dict=1_024 , __a: int=24 , __a: str=16 , __a: Dict=4_096 , __a: Union[str, Any]="gelu" , __a: List[Any]=0.1 , __a: int=0.1 , __a: int=512 , __a: Union[str, Any]=2 , __a: str=0.02 , __a: List[Any]=1e-1_2 , __a: Tuple=0 , __a: Optional[Any]="absolute" , __a: Any=True , **__a: Optional[Any] , )-> Tuple:
super().__init__(pad_token_id=_A , **_A )
lowerCamelCase : int = vocab_size
lowerCamelCase : str = hidden_size
lowerCamelCase : List[Any] = num_hidden_layers
lowerCamelCase : int = num_attention_heads
lowerCamelCase : Union[str, Any] = hidden_act
lowerCamelCase : List[Any] = intermediate_size
lowerCamelCase : List[Any] = hidden_dropout_prob
lowerCamelCase : Tuple = attention_probs_dropout_prob
lowerCamelCase : Optional[int] = max_position_embeddings
lowerCamelCase : Any = type_vocab_size
lowerCamelCase : Dict = initializer_range
lowerCamelCase : List[Any] = layer_norm_eps
lowerCamelCase : str = position_embedding_type
lowerCamelCase : List[Any] = use_cache
| 713
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self: Optional[Any] , __a: Union[str, Any] , __a: Optional[Any]=13 , __a: Optional[Any]=32 , __a: Dict=3 , __a: int=4 , __a: Dict=[10, 20, 30, 40] , __a: int=[2, 2, 3, 2] , __a: Any=True , __a: List[Any]=True , __a: Any=37 , __a: Optional[int]="gelu" , __a: List[str]=10 , __a: Optional[int]=0.02 , __a: Dict=["stage2", "stage3", "stage4"] , __a: List[str]=[2, 3, 4] , __a: List[str]=None , )-> Union[str, Any]:
lowerCamelCase : Optional[int] = parent
lowerCamelCase : Optional[int] = batch_size
lowerCamelCase : Any = image_size
lowerCamelCase : Tuple = num_channels
lowerCamelCase : str = num_stages
lowerCamelCase : List[str] = hidden_sizes
lowerCamelCase : str = depths
lowerCamelCase : Dict = is_training
lowerCamelCase : Optional[Any] = use_labels
lowerCamelCase : List[str] = intermediate_size
lowerCamelCase : List[str] = hidden_act
lowerCamelCase : List[str] = num_labels
lowerCamelCase : Union[str, Any] = initializer_range
lowerCamelCase : List[Any] = out_features
lowerCamelCase : Optional[Any] = out_indices
lowerCamelCase : int = scope
def a__ ( self: str )-> Optional[Any]:
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Dict = None
if self.use_labels:
lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase : Any = self.get_config()
return config, pixel_values, labels
def a__ ( self: Dict )-> Union[str, Any]:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a__ ( self: Optional[Any] , __a: List[Any] , __a: Any , __a: int )-> List[Any]:
lowerCamelCase : Optional[int] = ConvNextModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ ( self: int , __a: Union[str, Any] , __a: List[Any] , __a: Tuple )-> Optional[int]:
lowerCamelCase : str = ConvNextForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self: List[Any] , __a: Any , __a: Optional[int] , __a: Tuple )-> List[str]:
lowerCamelCase : List[str] = ConvNextBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCamelCase : Tuple = None
lowerCamelCase : List[str] = ConvNextBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self: Optional[Any] )-> Any:
lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = config_and_inputs
lowerCamelCase : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : int =(
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
snake_case__ : str =(
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
snake_case__ : Union[str, Any] =True
snake_case__ : Optional[int] =False
snake_case__ : Tuple =False
snake_case__ : Union[str, Any] =False
snake_case__ : Tuple =False
def a__ ( self: Optional[Any] )-> Union[str, Any]:
lowerCamelCase : Tuple = ConvNextModelTester(self )
lowerCamelCase : List[Any] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def a__ ( self: Optional[int] )-> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self: Optional[int] )-> Optional[Any]:
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def a__ ( self: int )-> Dict:
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def a__ ( self: Dict )-> Optional[Any]:
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def a__ ( self: int )-> List[Any]:
pass
def a__ ( self: Union[str, Any] )-> int:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Any = model_class(__a )
lowerCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
lowerCamelCase : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def a__ ( self: Optional[int] )-> str:
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: str )-> int:
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def a__ ( self: int )-> Optional[int]:
def check_hidden_states_output(__a: Tuple , __a: int , __a: Tuple ):
lowerCamelCase : str = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase : Tuple = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__a ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase , lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : List[Any] = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Tuple = True
check_hidden_states_output(__a , __a , __a )
def a__ ( self: Dict )-> Optional[Any]:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def a__ ( self: Optional[Any] )-> Tuple:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : str = ConvNextModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case ( ) -> Optional[int]:
lowerCamelCase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def a__ ( self: Dict )-> Union[str, Any]:
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def a__ ( self: List[str] )-> Dict:
lowerCamelCase : Tuple = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(__a )
lowerCamelCase : Dict = self.default_image_processor
lowerCamelCase : Union[str, Any] = prepare_img()
lowerCamelCase : Optional[Any] = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
lowerCamelCase : Any = model(**__a )
# verify the logits
lowerCamelCase : Union[str, Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase : Tuple = torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
@require_torch
class A__ ( unittest.TestCase , __lowercase):
"""simple docstring"""
snake_case__ : Union[str, Any] =(ConvNextBackbone,) if is_torch_available() else ()
snake_case__ : Optional[Any] =ConvNextConfig
snake_case__ : Optional[Any] =False
def a__ ( self: List[str] )-> int:
lowerCamelCase : Dict = ConvNextModelTester(self )
| 42
| 0
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self: int , __a: Union[str, Any] , __a: Union[str, Any]=3 , __a: int=32 , __a: Any=3 , __a: List[str]=10 , __a: int=[10, 20, 30, 40] , __a: List[str]=[1, 1, 2, 1] , __a: str=True , __a: List[str]=True , __a: Dict="relu" , __a: int=3 , __a: Dict=None , )-> Optional[Any]:
lowerCamelCase : Tuple = parent
lowerCamelCase : List[Any] = batch_size
lowerCamelCase : Optional[Any] = image_size
lowerCamelCase : List[Any] = num_channels
lowerCamelCase : Any = embeddings_size
lowerCamelCase : Union[str, Any] = hidden_sizes
lowerCamelCase : List[Any] = depths
lowerCamelCase : Union[str, Any] = is_training
lowerCamelCase : Any = use_labels
lowerCamelCase : str = hidden_act
lowerCamelCase : Optional[Any] = num_labels
lowerCamelCase : Tuple = scope
lowerCamelCase : List[str] = len(__UpperCamelCase )
def a__ ( self: Any )-> List[str]:
lowerCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : str = None
if self.use_labels:
lowerCamelCase : Any = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def a__ ( self: str )-> int:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def a__ ( self: Optional[int] , __a: int , __a: Tuple , __a: Union[str, Any] )-> Any:
lowerCamelCase : Any = TFRegNetModel(config=__UpperCamelCase )
lowerCamelCase : int = model(__UpperCamelCase , training=__UpperCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ ( self: Union[str, Any] , __a: int , __a: List[Any] , __a: List[str] )-> Any:
lowerCamelCase : List[Any] = self.num_labels
lowerCamelCase : Any = TFRegNetForImageClassification(__UpperCamelCase )
lowerCamelCase : Optional[int] = model(__UpperCamelCase , labels=__UpperCamelCase , training=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self: int )-> str:
lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = config_and_inputs
lowerCamelCase : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str =(TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
snake_case__ : List[str] =(
{'''feature-extraction''': TFRegNetModel, '''image-classification''': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
snake_case__ : List[str] =False
snake_case__ : Tuple =False
snake_case__ : Tuple =False
snake_case__ : Dict =False
snake_case__ : str =False
def a__ ( self: Tuple )-> List[Any]:
lowerCamelCase : Any = TFRegNetModelTester(self )
lowerCamelCase : Optional[int] = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase )
def a__ ( self: Union[str, Any] )-> int:
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def a__ ( self: Optional[int] )-> Optional[int]:
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def a__ ( self: Tuple )-> Union[str, Any]:
super().test_keras_fit()
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def a__ ( self: Optional[Any] )-> str:
pass
def a__ ( self: Dict )-> Optional[int]:
lowerCamelCase , lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : int = model_class(__UpperCamelCase )
lowerCamelCase : str = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Any = [*signature.parameters.keys()]
lowerCamelCase : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def a__ ( self: Any )-> int:
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def a__ ( self: int )-> List[Any]:
def check_hidden_states_output(__a: Union[str, Any] , __a: str , __a: Any ):
lowerCamelCase : Any = model_class(__UpperCamelCase )
lowerCamelCase : List[str] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) , training=__UpperCamelCase )
lowerCamelCase : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase : List[str] = self.model_tester.num_stages
self.assertEqual(len(__UpperCamelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
lowerCamelCase , lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : Optional[int] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCamelCase : Union[str, Any] = layer_type
lowerCamelCase : Optional[int] = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Tuple = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def a__ ( self: Optional[Any] )-> Tuple:
lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(__a: List[str] , __a: Optional[Any] , __a: Tuple , __a: List[Any]={} ):
lowerCamelCase : Optional[int] = model(__UpperCamelCase , return_dict=__UpperCamelCase , **__UpperCamelCase )
lowerCamelCase : Union[str, Any] = model(__UpperCamelCase , return_dict=__UpperCamelCase , **__UpperCamelCase ).to_tuple()
def recursive_check(__a: Union[str, Any] , __a: Dict ):
if isinstance(__UpperCamelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__UpperCamelCase , __UpperCamelCase ):
recursive_check(__UpperCamelCase , __UpperCamelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(__UpperCamelCase , __UpperCamelCase ) ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
f' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'
) , )
recursive_check(__UpperCamelCase , __UpperCamelCase )
for model_class in self.all_model_classes:
lowerCamelCase : List[str] = model_class(__UpperCamelCase )
lowerCamelCase : Any = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
lowerCamelCase : List[Any] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
check_equivalence(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
lowerCamelCase : Optional[Any] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
lowerCamelCase : List[Any] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
check_equivalence(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
lowerCamelCase : Optional[int] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
lowerCamelCase : Optional[Any] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
check_equivalence(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , {"""output_hidden_states""": True} )
lowerCamelCase : int = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
lowerCamelCase : Tuple = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
check_equivalence(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , {"""output_hidden_states""": True} )
def a__ ( self: Union[str, Any] )-> Any:
lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def a__ ( self: List[Any] )-> Tuple:
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Dict = TFRegNetModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def snake_case ( ) -> Optional[int]:
lowerCamelCase : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class A__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def a__ ( self: Union[str, Any] )-> Optional[Any]:
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def a__ ( self: List[str] )-> int:
lowerCamelCase : Any = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCamelCase : str = self.default_image_processor
lowerCamelCase : str = prepare_img()
lowerCamelCase : Any = image_processor(images=__UpperCamelCase , return_tensors="""tf""" )
# forward pass
lowerCamelCase : Tuple = model(**__UpperCamelCase , training=__UpperCamelCase )
# verify the logits
lowerCamelCase : Optional[int] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
lowerCamelCase : Union[str, Any] = tf.constant([-0.41_80, -1.50_51, -3.48_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 )
| 714
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Optional[int] = logging.get_logger(__name__)
__lowerCamelCase :List[str] = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Optional[Any] ='''realm'''
def __init__( self: Union[str, Any] , __a: List[Any]=30_522 , __a: List[Any]=768 , __a: List[Any]=128 , __a: Union[str, Any]=12 , __a: Union[str, Any]=12 , __a: Optional[Any]=8 , __a: Dict=3_072 , __a: List[Any]="gelu_new" , __a: List[Any]=0.1 , __a: Tuple=0.1 , __a: Optional[Any]=512 , __a: Optional[int]=2 , __a: str=0.02 , __a: int=1e-1_2 , __a: Optional[Any]=256 , __a: Any=10 , __a: Dict=1e-3 , __a: Optional[Any]=5 , __a: Dict=320 , __a: Tuple=13_353_718 , __a: List[Any]=5_000 , __a: Dict=1 , __a: int=0 , __a: Dict=2 , **__a: List[str] , )-> Any:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
# Common config
lowerCamelCase : Optional[Any] = vocab_size
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : Dict = hidden_size
lowerCamelCase : Dict = retriever_proj_size
lowerCamelCase : Optional[Any] = num_hidden_layers
lowerCamelCase : List[str] = num_attention_heads
lowerCamelCase : Tuple = num_candidates
lowerCamelCase : int = intermediate_size
lowerCamelCase : Dict = hidden_act
lowerCamelCase : List[str] = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Optional[int] = initializer_range
lowerCamelCase : Dict = type_vocab_size
lowerCamelCase : Optional[Any] = layer_norm_eps
# Reader config
lowerCamelCase : List[str] = span_hidden_size
lowerCamelCase : Dict = max_span_width
lowerCamelCase : Optional[Any] = reader_layer_norm_eps
lowerCamelCase : Optional[int] = reader_beam_size
lowerCamelCase : List[Any] = reader_seq_len
# Retrieval config
lowerCamelCase : int = num_block_records
lowerCamelCase : Dict = searcher_beam_size
| 42
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCamelCase :Union[str, Any] = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :str = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
__lowerCamelCase :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 715
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Tuple = logging.get_logger(__name__)
__lowerCamelCase :Any = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple ='''glpn'''
def __init__( self: Dict , __a: List[str]=3 , __a: Optional[int]=4 , __a: Dict=[2, 2, 2, 2] , __a: str=[8, 4, 2, 1] , __a: Optional[int]=[32, 64, 160, 256] , __a: Dict=[7, 3, 3, 3] , __a: Dict=[4, 2, 2, 2] , __a: Optional[Any]=[1, 2, 5, 8] , __a: Tuple=[4, 4, 4, 4] , __a: int="gelu" , __a: Union[str, Any]=0.0 , __a: str=0.0 , __a: Union[str, Any]=0.02 , __a: str=0.1 , __a: Union[str, Any]=1e-6 , __a: Any=64 , __a: Dict=10 , __a: Union[str, Any]=-1 , **__a: Optional[Any] , )-> Dict:
super().__init__(**__a )
lowerCamelCase : Dict = num_channels
lowerCamelCase : Any = num_encoder_blocks
lowerCamelCase : Dict = depths
lowerCamelCase : List[str] = sr_ratios
lowerCamelCase : Dict = hidden_sizes
lowerCamelCase : Tuple = patch_sizes
lowerCamelCase : Optional[int] = strides
lowerCamelCase : Optional[Any] = mlp_ratios
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : List[str] = hidden_act
lowerCamelCase : Any = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : List[Any] = initializer_range
lowerCamelCase : Dict = drop_path_rate
lowerCamelCase : Any = layer_norm_eps
lowerCamelCase : Optional[Any] = decoder_hidden_size
lowerCamelCase : Tuple = max_depth
lowerCamelCase : Optional[Any] = head_in_index
| 42
| 0
|
"""simple docstring"""
import os
import numpy
import onnx
def snake_case ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ) -> Tuple:
lowerCamelCase : Optional[Any] = a.name
lowerCamelCase : Any = b.name
lowerCamelCase : List[str] = """"""
lowerCamelCase : Optional[Any] = """"""
lowerCamelCase : Optional[Any] = a == b
lowerCamelCase : Optional[int] = name_a
lowerCamelCase : List[Any] = name_b
return res
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int ) -> Optional[Any]:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCamelCase__ , UpperCamelCase__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCamelCase__ , UpperCamelCase__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ )
def snake_case ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str ) -> List[str]:
for n in graph_proto.node:
_node_replace_input_with(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict ) -> str:
lowerCamelCase : Tuple = list(model.graph.initializer )
lowerCamelCase : List[str] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
lowerCamelCase : Any = inits[i].name
lowerCamelCase : List[str] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCamelCase__ , UpperCamelCase__ )
def snake_case ( UpperCamelCase__ : Optional[Any] ) -> int:
lowerCamelCase : Dict = os.path.dirname(UpperCamelCase__ )
lowerCamelCase : Tuple = os.path.basename(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = onnx.load(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase : Any = list(model.graph.initializer )
lowerCamelCase : str = set()
lowerCamelCase : Optional[Any] = {}
lowerCamelCase : int = []
lowerCamelCase : List[Any] = 0
for i in range(len(UpperCamelCase__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCamelCase__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCamelCase__ )
dup_set.add(UpperCamelCase__ )
lowerCamelCase : Tuple = inits[j].data_type
lowerCamelCase : Dict = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("""unexpected data type: """ , UpperCamelCase__ )
total_reduced_size += mem_size
lowerCamelCase : Optional[Any] = inits[i].name
lowerCamelCase : Tuple = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCamelCase__ )
else:
lowerCamelCase : Dict = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" )
lowerCamelCase : List[str] = sorted(UpperCamelCase__ )
_remove_dup_initializers_from_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : int = """optimized_""" + model_file_name
lowerCamelCase : Dict = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
onnx.save(UpperCamelCase__ , UpperCamelCase__ )
return new_model
| 716
|
"""simple docstring"""
from __future__ import annotations
import math
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : int ) -> float:
lowerCamelCase : Dict = u
for i in range(1 , UpperCamelCase__ ):
lowerCamelCase : List[str] = temp * (u - i)
return temp
def snake_case ( ) -> None:
lowerCamelCase : List[Any] = int(input("""enter the numbers of values: """ ) )
lowerCamelCase : list[list[float]] = []
for _ in range(UpperCamelCase__ ):
y.append([] )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
y[i].append(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = 0
print("""enter the values of parameters in a list: """ )
lowerCamelCase : Any = list(map(UpperCamelCase__ , input().split() ) )
print("""enter the values of corresponding parameters: """ )
for i in range(UpperCamelCase__ ):
lowerCamelCase : int = float(input() )
lowerCamelCase : Dict = int(input("""enter the value to interpolate: """ ) )
lowerCamelCase : List[Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , UpperCamelCase__ ):
for j in range(n - i ):
lowerCamelCase : str = y[j + 1][i - 1] - y[j][i - 1]
lowerCamelCase : Any = y[0][0]
for i in range(1 , UpperCamelCase__ ):
summ += (ucal(UpperCamelCase__ , UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 42
| 0
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__lowerCamelCase :List[str] = logging.get_logger(__name__)
__lowerCamelCase :str = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
__lowerCamelCase :List[str] = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def snake_case ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict ) -> Union[str, Any]:
for attribute in key.split(""".""" ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
lowerCamelCase : int = '''lm_head'''
lowerCamelCase : List[str] = getattr(UpperCamelCase__ , UpperCamelCase__ )
if weight_type is not None:
lowerCamelCase : Tuple = getattr(UpperCamelCase__ , UpperCamelCase__ ).shape
else:
lowerCamelCase : Any = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
lowerCamelCase : Optional[Any] = value
elif weight_type == "weight_g":
lowerCamelCase : Tuple = value
elif weight_type == "weight_v":
lowerCamelCase : Optional[Any] = value
elif weight_type == "bias":
lowerCamelCase : str = value
else:
lowerCamelCase : Any = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def snake_case ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int ) -> Any:
lowerCamelCase : Dict = []
lowerCamelCase : Optional[int] = fairseq_model.state_dict()
lowerCamelCase : int = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hf_model.config.feat_extract_norm == """group""" , )
lowerCamelCase : str = True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase : Any = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowerCamelCase : Dict = True
if "*" in mapped_key:
lowerCamelCase : Tuple = name.split(UpperCamelCase__ )[0].split(""".""" )[-2]
lowerCamelCase : int = mapped_key.replace("""*""" , UpperCamelCase__ )
if "weight_g" in name:
lowerCamelCase : Optional[Any] = '''weight_g'''
elif "weight_v" in name:
lowerCamelCase : List[str] = '''weight_v'''
elif "bias" in name:
lowerCamelCase : Union[str, Any] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase : Optional[Any] = '''weight'''
else:
lowerCamelCase : Optional[int] = None
set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
continue
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(F'Unused weights: {unused_weights}' )
def snake_case ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ) -> Dict:
lowerCamelCase : Optional[int] = full_name.split("""conv_layers.""" )[-1]
lowerCamelCase : Optional[int] = name.split(""".""" )
lowerCamelCase : List[Any] = int(items[0] )
lowerCamelCase : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
lowerCamelCase : List[str] = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
lowerCamelCase : Tuple = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
lowerCamelCase : Optional[int] = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
lowerCamelCase : Optional[int] = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(UpperCamelCase__ )
@torch.no_grad()
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : List[Any]=True ) -> List[Any]:
if config_path is not None:
lowerCamelCase : List[str] = UniSpeechConfig.from_pretrained(UpperCamelCase__ )
else:
lowerCamelCase : Optional[int] = UniSpeechConfig()
if is_finetuned:
if dict_path:
lowerCamelCase : str = Dictionary.load_from_json(UpperCamelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase : Optional[int] = target_dict.pad_index
lowerCamelCase : str = target_dict.bos_index
lowerCamelCase : List[Any] = target_dict.eos_index
lowerCamelCase : int = len(target_dict.symbols )
lowerCamelCase : str = os.path.join(UpperCamelCase__ , """vocab.json""" )
if not os.path.isdir(UpperCamelCase__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(UpperCamelCase__ ) )
return
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
lowerCamelCase : List[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCamelCase : str = 42
lowerCamelCase : List[str] = 43
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Dict = WavaVecaPhonemeCTCTokenizer(
UpperCamelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=UpperCamelCase__ , )
lowerCamelCase : int = True if config.feat_extract_norm == '''layer''' else False
lowerCamelCase : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , )
lowerCamelCase : int = WavaVecaProcessor(feature_extractor=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
lowerCamelCase : Dict = UniSpeechForCTC(UpperCamelCase__ )
else:
lowerCamelCase : Dict = UniSpeechForPreTraining(UpperCamelCase__ )
if is_finetuned:
lowerCamelCase : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] ), """w2v_path""": checkpoint_path} )
else:
lowerCamelCase : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowerCamelCase : Optional[int] = model[0].eval()
recursively_load_weights(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
hf_unispeech.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__lowerCamelCase :Any = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
__lowerCamelCase :int = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 717
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__lowerCamelCase :str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[Any] = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
__lowerCamelCase :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( UpperCamelCase_ , unittest.TestCase):
"""simple docstring"""
snake_case__ : str =KandinskyVaaInpaintPipeline
snake_case__ : Optional[int] =['''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
snake_case__ : int =[
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
snake_case__ : List[str] =[
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
snake_case__ : Tuple =False
@property
def a__ ( self: Dict )-> Optional[Any]:
return 32
@property
def a__ ( self: str )-> Optional[int]:
return 32
@property
def a__ ( self: str )-> Dict:
return self.time_input_dim
@property
def a__ ( self: Any )-> int:
return self.time_input_dim * 4
@property
def a__ ( self: Any )-> Union[str, Any]:
return 100
@property
def a__ ( self: Any )-> Optional[Any]:
torch.manual_seed(0 )
lowerCamelCase : Dict = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowerCamelCase : List[Any] = UNetaDConditionModel(**__a )
return model
@property
def a__ ( self: Optional[int] )-> int:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a__ ( self: List[str] )-> Optional[int]:
torch.manual_seed(0 )
lowerCamelCase : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def a__ ( self: Optional[int] )-> Union[str, Any]:
lowerCamelCase : Optional[int] = self.dummy_unet
lowerCamelCase : str = self.dummy_movq
lowerCamelCase : List[Any] = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="""linear""" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=__a , set_alpha_to_one=__a , steps_offset=1 , prediction_type="""epsilon""" , thresholding=__a , )
lowerCamelCase : Any = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def a__ ( self: Any , __a: int , __a: List[Any]=0 )-> Union[str, Any]:
lowerCamelCase : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__a ) ).to(__a )
lowerCamelCase : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__a )
# create init_image
lowerCamelCase : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(__a ) ).to(__a )
lowerCamelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase : List[str] = Image.fromarray(np.uinta(__a ) ).convert("""RGB""" ).resize((256, 256) )
# create mask
lowerCamelCase : Tuple = np.ones((64, 64) , dtype=np.floataa )
lowerCamelCase : int = 0
if str(__a ).startswith("""mps""" ):
lowerCamelCase : int = torch.manual_seed(__a )
else:
lowerCamelCase : Optional[int] = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : List[str] = {
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : Any = '''cpu'''
lowerCamelCase : Dict = self.get_dummy_components()
lowerCamelCase : Dict = self.pipeline_class(**__a )
lowerCamelCase : Union[str, Any] = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : List[Any] = pipe(**self.get_dummy_inputs(__a ) )
lowerCamelCase : Tuple = output.images
lowerCamelCase : List[str] = pipe(
**self.get_dummy_inputs(__a ) , return_dict=__a , )[0]
lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
lowerCamelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
print(f'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
lowerCamelCase : int = np.array(
[0.50_77_59_03, 0.49_52_71_95, 0.48_82_45_43, 0.50_19_22_37, 0.48_64_49_06, 0.49_37_38_14, 0.4_78_05_98, 0.47_23_48_27, 0.48_32_78_48] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def a__ ( self: Optional[int] )-> str:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Union[str, Any] )-> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self: Union[str, Any] )-> int:
lowerCamelCase : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" )
lowerCamelCase : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
lowerCamelCase : int = np.ones((768, 768) , dtype=np.floataa )
lowerCamelCase : Dict = 0
lowerCamelCase : Optional[int] = '''a hat'''
lowerCamelCase : Dict = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__a )
lowerCamelCase : Optional[Any] = KandinskyVaaInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder-inpaint""" , torch_dtype=torch.floataa )
lowerCamelCase : int = pipeline.to(__a )
pipeline.set_progress_bar_config(disable=__a )
lowerCamelCase : Dict = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCamelCase : List[str] = pipe_prior(
__a , generator=__a , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
lowerCamelCase : List[Any] = pipeline(
image=__a , mask_image=__a , image_embeds=__a , negative_image_embeds=__a , generator=__a , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , )
lowerCamelCase : Tuple = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__a , __a )
| 718
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase :Dict = logging.get_logger()
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : LevitConfig , UpperCamelCase__ : Path , UpperCamelCase__ : bool = True ) -> Dict:
print(F'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
lowerCamelCase : Optional[Any] = timm.create_model("""levit_128s""" , pretrained=UpperCamelCase__ )
else:
lowerCamelCase : Dict = timm.create_model("""levit_128""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 192:
lowerCamelCase : Tuple = timm.create_model("""levit_192""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 256:
lowerCamelCase : Optional[int] = timm.create_model("""levit_256""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 384:
lowerCamelCase : Dict = timm.create_model("""levit_384""" , pretrained=UpperCamelCase__ )
from_model.eval()
lowerCamelCase : Optional[Any] = LevitForImageClassificationWithTeacher(UpperCamelCase__ ).eval()
lowerCamelCase : Tuple = OrderedDict()
lowerCamelCase : Optional[Any] = from_model.state_dict()
lowerCamelCase : str = list(from_model.state_dict().keys() )
lowerCamelCase : List[Any] = list(our_model.state_dict().keys() )
print(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for i in range(len(UpperCamelCase__ ) ):
lowerCamelCase : str = weights[og_keys[i]]
our_model.load_state_dict(UpperCamelCase__ )
lowerCamelCase : int = torch.randn((2, 3, 224, 224) )
lowerCamelCase : Any = from_model(UpperCamelCase__ )
lowerCamelCase : List[Any] = our_model(UpperCamelCase__ ).logits
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ ), "The model logits don't match the original one."
lowerCamelCase : Dict = name
print(UpperCamelCase__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowerCamelCase : Optional[int] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'Pushed {checkpoint_name}' )
def snake_case ( UpperCamelCase__ : Path , UpperCamelCase__ : str = None , UpperCamelCase__ : bool = True ) -> Optional[int]:
lowerCamelCase : Optional[Any] = """imagenet-1k-id2label.json"""
lowerCamelCase : List[Any] = 1000
lowerCamelCase : Dict = (1, num_labels)
lowerCamelCase : List[Any] = """huggingface/label-files"""
lowerCamelCase : Optional[int] = num_labels
lowerCamelCase : List[str] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase : Any = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase : List[Any] = idalabel
lowerCamelCase : str = {v: k for k, v in idalabel.items()}
lowerCamelCase : Tuple = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ )
lowerCamelCase : Optional[int] = {
"""levit-128S""": 128,
"""levit-128""": 128,
"""levit-192""": 192,
"""levit-256""": 256,
"""levit-384""": 384,
}
lowerCamelCase : List[Any] = {
"""levit-128S""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-128""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-192""": ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-256""": ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-384""": ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return config, expected_shape
if __name__ == "__main__":
__lowerCamelCase :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
__lowerCamelCase :List[Any] = parser.parse_args()
__lowerCamelCase :Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 42
| 0
|
"""simple docstring"""
import numpy as np
from transformers import Pipeline
def snake_case ( UpperCamelCase__ : Optional[Any] ) -> str:
lowerCamelCase : str = np.max(UpperCamelCase__ , axis=-1 , keepdims=UpperCamelCase__ )
lowerCamelCase : Any = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=UpperCamelCase__ )
class A__ ( lowercase_):
"""simple docstring"""
def a__ ( self: Tuple , **__a: int )-> Dict:
lowerCamelCase : List[Any] = {}
if "second_text" in kwargs:
lowerCamelCase : List[str] = kwargs["""second_text"""]
return preprocess_kwargs, {}, {}
def a__ ( self: List[str] , __a: str , __a: Any=None )-> Optional[Any]:
return self.tokenizer(__a , text_pair=__a , return_tensors=self.framework )
def a__ ( self: Tuple , __a: Optional[int] )-> Tuple:
return self.model(**__a )
def a__ ( self: int , __a: Dict )-> Union[str, Any]:
lowerCamelCase : int = model_outputs.logits[0].numpy()
lowerCamelCase : Optional[Any] = softmax(__a )
lowerCamelCase : Any = np.argmax(__a )
lowerCamelCase : Dict = self.model.config.idalabel[best_class]
lowerCamelCase : Tuple = probabilities[best_class].item()
lowerCamelCase : int = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 719
|
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple =(KDPMaDiscreteScheduler,)
snake_case__ : Tuple =10
def a__ ( self: List[Any] , **__a: Optional[int] )-> Union[str, Any]:
lowerCamelCase : int = {
"""num_train_timesteps""": 1_100,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**__a )
return config
def a__ ( self: Union[str, Any] )-> Any:
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__a )
def a__ ( self: str )-> int:
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def a__ ( self: int )-> Union[str, Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a )
def a__ ( self: List[Any] )-> List[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def a__ ( self: Union[str, Any] )-> int:
lowerCamelCase : List[str] = self.scheduler_classes[0]
lowerCamelCase : Union[str, Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
lowerCamelCase : List[str] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase : Dict = self.dummy_model()
lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase : List[Any] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : Optional[Any] = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[int] = model(__a , __a )
lowerCamelCase : Tuple = scheduler.step(__a , __a , __a )
lowerCamelCase : Optional[Any] = output.prev_sample
lowerCamelCase : List[str] = torch.sum(torch.abs(__a ) )
lowerCamelCase : Tuple = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4e-0_7 ) < 1e-2
assert abs(result_mean.item() - 6.1_1_1_2e-1_0 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7 ) < 1e-2
assert abs(result_mean.item() - 0.00_02 ) < 1e-3
def a__ ( self: Any )-> Any:
if torch_device == "mps":
return
lowerCamelCase : Dict = self.scheduler_classes[0]
lowerCamelCase : Dict = self.get_scheduler_config()
lowerCamelCase : int = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase : List[Any] = self.dummy_model()
lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase : Optional[int] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : Dict = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[Any] = model(__a , __a )
lowerCamelCase : Tuple = scheduler.step(__a , __a , __a )
lowerCamelCase : str = output.prev_sample
lowerCamelCase : Tuple = torch.sum(torch.abs(__a ) )
lowerCamelCase : Tuple = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
def a__ ( self: Optional[Any] )-> List[Any]:
if torch_device == "mps":
return
lowerCamelCase : Any = self.scheduler_classes[0]
lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
lowerCamelCase : Optional[Any] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
lowerCamelCase : Union[str, Any] = self.dummy_model()
lowerCamelCase : List[str] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCamelCase : Union[str, Any] = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[int] = model(__a , __a )
lowerCamelCase : int = scheduler.step(__a , __a , __a )
lowerCamelCase : int = output.prev_sample
lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(__a ) )
lowerCamelCase : int = torch.mean(torch.abs(__a ) )
if str(__a ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
| 42
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase :Union[str, Any] = {
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Any = [
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__lowerCamelCase :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 720
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str =StableDiffusionXLImgaImgPipeline
snake_case__ : Any =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
snake_case__ : Optional[int] =PipelineTesterMixin.required_optional_params - {'''latents'''}
snake_case__ : Dict =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case__ : Tuple =IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case__ : List[str] =IMAGE_TO_IMAGE_IMAGE_PARAMS
def a__ ( self: List[str] )-> int:
torch.manual_seed(0 )
lowerCamelCase : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=__a , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
lowerCamelCase : Any = EulerDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
lowerCamelCase : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=32 , )
lowerCamelCase : Dict = CLIPTextModel(__a )
lowerCamelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__a )
lowerCamelCase : Dict = CLIPTextModelWithProjection(__a )
lowerCamelCase : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__a )
lowerCamelCase : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def a__ ( self: Any , __a: str , __a: Tuple=0 )-> Union[str, Any]:
lowerCamelCase : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
lowerCamelCase : Any = image / 2 + 0.5
if str(__a ).startswith("""mps""" ):
lowerCamelCase : Dict = torch.manual_seed(__a )
else:
lowerCamelCase : Tuple = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.75,
}
return inputs
def a__ ( self: Dict )-> Optional[Any]:
lowerCamelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase : Union[str, Any] = self.get_dummy_components()
lowerCamelCase : Optional[int] = StableDiffusionXLImgaImgPipeline(**__a )
lowerCamelCase : int = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Optional[Any] = self.get_dummy_inputs(__a )
lowerCamelCase : Optional[int] = sd_pipe(**__a ).images
lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase : Any = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self: Optional[int] )-> Union[str, Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def a__ ( self: Optional[Any] )-> str:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def a__ ( self: List[str] )-> Optional[Any]:
pass
def a__ ( self: List[Any] )-> Union[str, Any]:
lowerCamelCase : Tuple = self.get_dummy_components()
lowerCamelCase : Union[str, Any] = StableDiffusionXLImgaImgPipeline(**__a )
lowerCamelCase : str = sd_pipe.to(__a )
lowerCamelCase : Any = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
# forward without prompt embeds
lowerCamelCase : Dict = self.get_dummy_inputs(__a )
lowerCamelCase : Any = 3 * ["""this is a negative prompt"""]
lowerCamelCase : Optional[int] = negative_prompt
lowerCamelCase : Tuple = 3 * [inputs["""prompt"""]]
lowerCamelCase : List[Any] = sd_pipe(**__a )
lowerCamelCase : Optional[int] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCamelCase : Tuple = self.get_dummy_inputs(__a )
lowerCamelCase : List[Any] = 3 * ["""this is a negative prompt"""]
lowerCamelCase : Tuple = 3 * [inputs.pop("""prompt""" )]
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Union[str, Any] = sd_pipe.encode_prompt(__a , negative_prompt=__a )
lowerCamelCase : int = sd_pipe(
**__a , prompt_embeds=__a , negative_prompt_embeds=__a , pooled_prompt_embeds=__a , negative_pooled_prompt_embeds=__a , )
lowerCamelCase : Union[str, Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Dict )-> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self: Union[str, Any] , __a: Any , __a: Any="cpu" , __a: str=torch.floataa , __a: Any=0 )-> Optional[Any]:
lowerCamelCase : Optional[Any] = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : List[Any] = np.random.RandomState(__a ).standard_normal((1, 4, 64, 64) )
lowerCamelCase : List[str] = torch.from_numpy(__a ).to(device=__a , dtype=__a )
lowerCamelCase : int = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def a__ ( self: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Optional[int] = self.get_inputs(__a )
lowerCamelCase : Optional[Any] = pipe(**__a ).images
lowerCamelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCamelCase : List[str] = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 42
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class A__ :
"""simple docstring"""
def __init__( self: Union[str, Any] , __a: Any , )-> List[Any]:
lowerCamelCase : Dict = parent
lowerCamelCase : List[str] = 13
lowerCamelCase : Optional[int] = 7
lowerCamelCase : str = True
lowerCamelCase : Optional[Any] = True
lowerCamelCase : Tuple = True
lowerCamelCase : Optional[Any] = True
lowerCamelCase : str = True
lowerCamelCase : int = False
lowerCamelCase : Any = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Tuple = 2
lowerCamelCase : Union[str, Any] = 99
lowerCamelCase : int = 0
lowerCamelCase : int = 32
lowerCamelCase : str = 2
lowerCamelCase : Optional[Any] = 4
lowerCamelCase : Optional[Any] = 0.1
lowerCamelCase : Optional[int] = 0.1
lowerCamelCase : List[Any] = 512
lowerCamelCase : Optional[int] = 16
lowerCamelCase : Optional[Any] = 2
lowerCamelCase : int = 0.02
lowerCamelCase : Union[str, Any] = 3
lowerCamelCase : List[Any] = 4
lowerCamelCase : List[str] = """last"""
lowerCamelCase : List[Any] = True
lowerCamelCase : Optional[Any] = None
lowerCamelCase : str = 0
def a__ ( self: int )-> Tuple:
lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
lowerCamelCase : Tuple = None
if self.use_input_lengths:
lowerCamelCase : Optional[Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCamelCase : Dict = None
if self.use_token_type_ids:
lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCamelCase : Optional[Any] = None
lowerCamelCase : Dict = None
lowerCamelCase : Any = None
if self.use_labels:
lowerCamelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
lowerCamelCase : str = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase : Dict = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def a__ ( self: Tuple , __a: Optional[int] , __a: str , __a: Optional[Any] , __a: Union[str, Any] , __a: Optional[Any] , __a: Tuple , __a: int , __a: Tuple , __a: Optional[int] , )-> Optional[Any]:
lowerCamelCase : Union[str, Any] = TFFlaubertModel(config=_SCREAMING_SNAKE_CASE )
lowerCamelCase : Tuple = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
lowerCamelCase : Tuple = model(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = [input_ids, input_mask]
lowerCamelCase : List[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self: Union[str, Any] , __a: Any , __a: Tuple , __a: str , __a: List[Any] , __a: int , __a: Dict , __a: Dict , __a: Tuple , __a: int , )-> Tuple:
lowerCamelCase : Any = TFFlaubertWithLMHeadModel(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
lowerCamelCase : List[str] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self: Union[str, Any] , __a: str , __a: Union[str, Any] , __a: Optional[int] , __a: List[Any] , __a: List[str] , __a: Optional[Any] , __a: Tuple , __a: Dict , __a: str , )-> Tuple:
lowerCamelCase : Any = TFFlaubertForQuestionAnsweringSimple(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = {"""input_ids""": input_ids, """lengths""": input_lengths}
lowerCamelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self: Optional[int] , __a: Optional[Any] , __a: str , __a: int , __a: Optional[Any] , __a: int , __a: Tuple , __a: str , __a: Optional[Any] , __a: Optional[int] , )-> Union[str, Any]:
lowerCamelCase : List[Any] = TFFlaubertForSequenceClassification(_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[Any] = {"""input_ids""": input_ids, """lengths""": input_lengths}
lowerCamelCase : Dict = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self: Any , __a: List[str] , __a: List[str] , __a: List[Any] , __a: List[str] , __a: str , __a: List[str] , __a: str , __a: List[Any] , __a: str , )-> Tuple:
lowerCamelCase : int = self.num_labels
lowerCamelCase : Dict = TFFlaubertForTokenClassification(config=_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowerCamelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self: List[Any] , __a: List[str] , __a: List[Any] , __a: str , __a: int , __a: Any , __a: List[Any] , __a: str , __a: Optional[Any] , __a: Dict , )-> Any:
lowerCamelCase : Union[str, Any] = self.num_choices
lowerCamelCase : Any = TFFlaubertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
lowerCamelCase : Dict = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase : List[Any] = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase : Tuple = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase : str = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
lowerCamelCase : Tuple = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self: int )-> int:
lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Any = config_and_inputs
lowerCamelCase : Any = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""langs""": token_type_ids,
"""lengths""": input_lengths,
}
return config, inputs_dict
@require_tf
class A__ ( snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =(
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
snake_case__ : str =(
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
snake_case__ : Tuple =(
{
'''feature-extraction''': TFFlaubertModel,
'''fill-mask''': TFFlaubertWithLMHeadModel,
'''question-answering''': TFFlaubertForQuestionAnsweringSimple,
'''text-classification''': TFFlaubertForSequenceClassification,
'''token-classification''': TFFlaubertForTokenClassification,
'''zero-shot''': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ : Any =False
snake_case__ : Optional[int] =False
def a__ ( self: str , __a: Union[str, Any] , __a: int , __a: Optional[Any] , __a: Tuple , __a: List[Any] )-> Optional[int]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def a__ ( self: List[Any] )-> Dict:
lowerCamelCase : Tuple = TFFlaubertModelTester(self )
lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , emb_dim=37 )
def a__ ( self: Union[str, Any] )-> Union[str, Any]:
self.config_tester.run_common_tests()
def a__ ( self: Optional[int] )-> Optional[int]:
lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_SCREAMING_SNAKE_CASE )
def a__ ( self: List[Any] )-> List[str]:
lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_SCREAMING_SNAKE_CASE )
def a__ ( self: Tuple )-> Optional[Any]:
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_SCREAMING_SNAKE_CASE )
def a__ ( self: Any )-> Any:
lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_SCREAMING_SNAKE_CASE )
def a__ ( self: List[Any] )-> int:
lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*_SCREAMING_SNAKE_CASE )
def a__ ( self: List[str] )-> str:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
@slow
def a__ ( self: List[Any] )-> Optional[int]:
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Optional[int] = TFFlaubertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_tf
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase):
"""simple docstring"""
@slow
def a__ ( self: Union[str, Any] )-> Any:
lowerCamelCase : Union[str, Any] = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
lowerCamelCase : List[str] = tf.convert_to_tensor(
[[0, 158, 735, 2_592, 1_424, 6_727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
lowerCamelCase : List[Any] = model(_SCREAMING_SNAKE_CASE )[0]
lowerCamelCase : List[str] = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
lowerCamelCase : Optional[int] = tf.convert_to_tensor(
[
[
[-1.8_76_87_73, -1.56_65_55, 0.27_07_24_18],
[-1.6_92_00_38, -0.5_87_35_05, 1.9_32_95_99],
[-2.9_56_39_85, -1.6_99_38_35, 1.7_97_20_52],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 721
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class A__ :
"""simple docstring"""
def a__ ( self: Optional[int] , __a: Optional[int] , __a: Tuple , __a: Optional[int] )-> List[str]:
return None
class A__ :
"""simple docstring"""
def a__ ( self: Optional[int] , __a: Tuple , __a: str , __a: str , __a: str )-> Tuple:
return None
class A__ ( unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] =[
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def a__ ( self: Optional[Any] )-> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , """tf""" , 12 , **__a )
@require_torch
@slow
def a__ ( self: str )-> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , """pt""" , 12 , **__a )
@require_torch
@slow
def a__ ( self: Union[str, Any] )-> Dict:
from transformers import BertModel
lowerCamelCase : int = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(__a ) )
vocab_file.flush()
lowerCamelCase : Dict = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase : List[str] = BertModel(BertConfig(vocab_size=len(__a ) ) )
model.save_pretrained(__a )
self._test_export(__a , """pt""" , 12 , __a )
@require_tf
@slow
def a__ ( self: Optional[Any] )-> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase : Optional[int] = self._test_export(__a , """tf""" , 12 , **__a )
lowerCamelCase : Tuple = quantize(Path(__a ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def a__ ( self: Any )-> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase : Any = self._test_export(__a , """pt""" , 12 , **__a )
lowerCamelCase : Dict = quantize(__a )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def a__ ( self: List[Any] , __a: Optional[Any] , __a: List[Any] , __a: Union[str, Any] , __a: Optional[Any]=None , **__a: Optional[int] )-> Any:
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase : Optional[Any] = Path(__a ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__a , __a , __a , __a , __a , **__a )
return path
except Exception as e:
self.fail(__a )
@require_torch
@require_tokenizers
@slow
def a__ ( self: Tuple )-> Dict:
from transformers import BertModel
lowerCamelCase : int = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCamelCase : List[Any] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__a , __a , """pt""" )
@require_tf
@require_tokenizers
@slow
def a__ ( self: Optional[Any] )-> List[Any]:
from transformers import TFBertModel
lowerCamelCase : Union[str, Any] = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCamelCase : str = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__a , __a , """tf""" )
def a__ ( self: List[str] , __a: str , __a: Optional[Any] , __a: str )-> List[Any]:
lowerCamelCase : List[str] = FeatureExtractionPipeline(__a , __a )
lowerCamelCase : List[str] = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = infer_shapes(__a , __a )
# Assert all variables are present
self.assertEqual(len(__a ) , len(__a ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __a )
self.assertSequenceEqual(variable_names[3:] , __a )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} )
def a__ ( self: List[Any] )-> int:
lowerCamelCase : List[str] = ["""input_ids""", """attention_mask""", """token_type_ids"""]
lowerCamelCase : str = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncContiguousArgs() , __a , __a )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__a ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__a ) , set(__a ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__a , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncNonContiguousArgs() , __a , __a )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__a ) , 1 )
self.assertEqual(len(__a ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] , """input_ids""" )
def a__ ( self: Tuple )-> Tuple:
lowerCamelCase : Optional[int] = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
| 42
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__lowerCamelCase :Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowerCamelCase :Dict = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n'
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : int=8 ) -> List[str]:
lowerCamelCase : Union[str, Any] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowerCamelCase : List[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class A__ ( __lowercase):
"""simple docstring"""
def __init__( self: Union[str, Any] , __a: UNetaDConditionModel , __a: DDPMScheduler , __a: VQModel , )-> Optional[Any]:
super().__init__()
self.register_modules(
unet=__a , scheduler=__a , movq=__a , )
lowerCamelCase : Optional[int] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def a__ ( self: Optional[Any] , __a: Optional[int] , __a: Any , __a: List[Any] , __a: str , __a: List[str] , __a: Dict )-> Dict:
if latents is None:
lowerCamelCase : Optional[Any] = randn_tensor(__a , generator=__a , device=__a , dtype=__a )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowerCamelCase : int = latents.to(__a )
lowerCamelCase : Dict = latents * scheduler.init_noise_sigma
return latents
def a__ ( self: Optional[int] , __a: Optional[int]=0 )-> List[str]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
lowerCamelCase : Union[str, Any] = torch.device(f'cuda:{gpu_id}' )
lowerCamelCase : List[str] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__a , __a )
def a__ ( self: List[Any] , __a: List[Any]=0 )-> List[str]:
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
lowerCamelCase : Union[str, Any] = torch.device(f'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=__a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCamelCase : Tuple = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowerCamelCase : Any = cpu_offload_with_hook(__a , __a , prev_module_hook=__a )
# We'll offload the last model manually.
lowerCamelCase : Optional[int] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def a__ ( self: Optional[Any] )-> Union[str, Any]:
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__a , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__a )
def __call__( self: List[str] , __a: Union[torch.FloatTensor, List[torch.FloatTensor]] , __a: Union[torch.FloatTensor, List[torch.FloatTensor]] , __a: int = 512 , __a: int = 512 , __a: int = 100 , __a: float = 4.0 , __a: int = 1 , __a: Optional[Union[torch.Generator, List[torch.Generator]]] = None , __a: Optional[torch.FloatTensor] = None , __a: Optional[str] = "pil" , __a: bool = True , )-> Optional[int]:
lowerCamelCase : int = self._execution_device
lowerCamelCase : Optional[Any] = guidance_scale > 1.0
if isinstance(__a , __a ):
lowerCamelCase : Optional[Any] = torch.cat(__a , dim=0 )
lowerCamelCase : Tuple = image_embeds.shape[0] * num_images_per_prompt
if isinstance(__a , __a ):
lowerCamelCase : Union[str, Any] = torch.cat(__a , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase : List[Any] = image_embeds.repeat_interleave(__a , dim=0 )
lowerCamelCase : Union[str, Any] = negative_image_embeds.repeat_interleave(__a , dim=0 )
lowerCamelCase : List[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__a )
self.scheduler.set_timesteps(__a , device=__a )
lowerCamelCase : List[str] = self.scheduler.timesteps
lowerCamelCase : Tuple = self.unet.config.in_channels
lowerCamelCase : Dict = downscale_height_and_width(__a , __a , self.movq_scale_factor )
# create initial latent
lowerCamelCase : Tuple = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __a , __a , __a , self.scheduler , )
for i, t in enumerate(self.progress_bar(__a ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase : Tuple = {"""image_embeds""": image_embeds}
lowerCamelCase : Optional[int] = self.unet(
sample=__a , timestep=__a , encoder_hidden_states=__a , added_cond_kwargs=__a , return_dict=__a , )[0]
if do_classifier_free_guidance:
lowerCamelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
lowerCamelCase : List[str] = noise_pred.chunk(2 )
lowerCamelCase : Any = variance_pred.chunk(2 )
lowerCamelCase : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCamelCase : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCamelCase : Optional[int] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase : Any = self.scheduler.step(
__a , __a , __a , generator=__a , )[0]
# post-processing
lowerCamelCase : str = self.movq.decode(__a , force_not_quantize=__a )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
lowerCamelCase : str = image * 0.5 + 0.5
lowerCamelCase : List[str] = image.clamp(0 , 1 )
lowerCamelCase : str = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCamelCase : Dict = self.numpy_to_pil(__a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__a )
| 700
|
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Optional[int] )-> Union[str, Any]:
lowerCamelCase : Tuple = [10, 20, 30, 40, 50, 60]
lowerCamelCase : Union[str, Any] = [2, 4, 6, 8, 10, 12]
lowerCamelCase : Union[str, Any] = 100
self.assertEqual(kp.calc_profit(__a , __a , __a ) , 210 )
def a__ ( self: str )-> str:
self.assertRaisesRegex(__a , """max_weight must greater than zero.""" )
def a__ ( self: str )-> List[Any]:
self.assertRaisesRegex(__a , """Weight can not be negative.""" )
def a__ ( self: Any )-> Dict:
self.assertRaisesRegex(__a , """Profit can not be negative.""" )
def a__ ( self: Optional[Any] )-> List[Any]:
self.assertRaisesRegex(__a , """max_weight must greater than zero.""" )
def a__ ( self: Optional[Any] )-> Tuple:
self.assertRaisesRegex(
__a , """The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 42
| 0
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Dict =['''image_processor''', '''feature_extractor''']
snake_case__ : Dict ='''TvltImageProcessor'''
snake_case__ : Any ='''TvltFeatureExtractor'''
def __init__( self: Dict , __a: Dict , __a: List[Any] )-> Tuple:
super().__init__(image_processor=__a , feature_extractor=__a )
lowerCamelCase : Union[str, Any] = image_processor
lowerCamelCase : str = feature_extractor
def __call__( self: Tuple , __a: Dict=None , __a: Optional[int]=None , __a: Any=None , __a: Any=None , __a: Tuple=False , __a: Union[str, Any]=False , *__a: Optional[Any] , **__a: Any , )-> str:
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""" )
lowerCamelCase : List[Any] = None
if images is not None:
lowerCamelCase : List[str] = self.image_processor(__a , mask_pixel=__a , *__a , **__a )
if images_mixed is not None:
lowerCamelCase : int = self.image_processor(__a , is_mixed=__a , *__a , **__a )
if audio is not None:
lowerCamelCase : Tuple = self.feature_extractor(
__a , *__a , sampling_rate=__a , mask_audio=__a , **__a )
lowerCamelCase : Dict = {}
if audio is not None:
output_dict.update(__a )
if images is not None:
output_dict.update(__a )
if images_mixed_dict is not None:
output_dict.update(__a )
return output_dict
@property
def a__ ( self: Optional[Any] )-> List[Any]:
lowerCamelCase : str = self.image_processor.model_input_names
lowerCamelCase : Optional[int] = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 701
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase :List[str] = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[int] = ['OwlViTFeatureExtractor']
__lowerCamelCase :List[str] = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[Any] = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 0
|
"""simple docstring"""
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def snake_case ( UpperCamelCase__ : Any ) -> Optional[Any]:
return EnvironmentCommand()
class A__ ( __lowercase):
"""simple docstring"""
@staticmethod
def a__ ( __a: ArgumentParser )-> Optional[Any]:
lowerCamelCase : Optional[int] = parser.add_parser("""env""" )
download_parser.set_defaults(func=__a )
def a__ ( self: List[Any] )-> Any:
lowerCamelCase : Any = huggingface_hub.__version__
lowerCamelCase : str = """not installed"""
lowerCamelCase : int = """NA"""
if is_torch_available():
import torch
lowerCamelCase : Optional[int] = torch.__version__
lowerCamelCase : int = torch.cuda.is_available()
lowerCamelCase : Any = """not installed"""
if is_transformers_available():
import transformers
lowerCamelCase : Any = transformers.__version__
lowerCamelCase : Optional[int] = """not installed"""
if is_accelerate_available():
import accelerate
lowerCamelCase : Union[str, Any] = accelerate.__version__
lowerCamelCase : int = """not installed"""
if is_xformers_available():
import xformers
lowerCamelCase : Dict = xformers.__version__
lowerCamelCase : Dict = {
"""`diffusers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""PyTorch version (GPU?)""": f'{pt_version} ({pt_cuda_available})',
"""Huggingface_hub version""": hub_version,
"""Transformers version""": transformers_version,
"""Accelerate version""": accelerate_version,
"""xFormers version""": xformers_version,
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(__a ) )
return info
@staticmethod
def a__ ( __a: int )-> Optional[Any]:
return "\n".join([f'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 702
|
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self: List[Any] , __a: List[str] , __a: Optional[int]=13 , __a: List[str]=32 , __a: int=2 , __a: List[str]=3 , __a: Union[str, Any]=16 , __a: int=[32, 64, 128] , __a: Optional[Any]=[1, 2, 1] , __a: Optional[int]=[2, 2, 4] , __a: Tuple=2 , __a: Dict=2.0 , __a: List[str]=True , __a: Optional[Any]=0.0 , __a: Any=0.0 , __a: List[Any]=0.1 , __a: List[str]="gelu" , __a: Tuple=False , __a: Union[str, Any]=True , __a: Optional[int]=0.02 , __a: Tuple=1e-5 , __a: int=True , __a: List[Any]=None , __a: Optional[int]=True , __a: Dict=10 , __a: List[str]=8 , __a: Any=["stage1", "stage2"] , __a: Union[str, Any]=[1, 2] , )-> Dict:
lowerCamelCase : Dict = parent
lowerCamelCase : Optional[Any] = batch_size
lowerCamelCase : Union[str, Any] = image_size
lowerCamelCase : Optional[int] = patch_size
lowerCamelCase : Any = num_channels
lowerCamelCase : Any = embed_dim
lowerCamelCase : Dict = hidden_sizes
lowerCamelCase : List[Any] = depths
lowerCamelCase : Tuple = num_heads
lowerCamelCase : List[Any] = window_size
lowerCamelCase : str = mlp_ratio
lowerCamelCase : str = qkv_bias
lowerCamelCase : str = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Tuple = drop_path_rate
lowerCamelCase : Dict = hidden_act
lowerCamelCase : Tuple = use_absolute_embeddings
lowerCamelCase : List[str] = patch_norm
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : str = initializer_range
lowerCamelCase : Tuple = is_training
lowerCamelCase : int = scope
lowerCamelCase : Union[str, Any] = use_labels
lowerCamelCase : List[str] = type_sequence_label_size
lowerCamelCase : str = encoder_stride
lowerCamelCase : List[str] = out_features
lowerCamelCase : Optional[int] = out_indices
def a__ ( self: Optional[Any] )-> Union[str, Any]:
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : str = None
if self.use_labels:
lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : str = self.get_config()
return config, pixel_values, labels
def a__ ( self: List[Any] )-> Optional[int]:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def a__ ( self: Tuple , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = FocalNetModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Tuple = model(__a )
lowerCamelCase : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def a__ ( self: Optional[int] , __a: Dict , __a: Tuple , __a: List[Any] )-> int:
lowerCamelCase : List[Any] = FocalNetBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Optional[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowerCamelCase : Dict = None
lowerCamelCase : Dict = FocalNetBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self: Optional[int] , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = FocalNetForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[str] = model(__a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase : List[str] = 1
lowerCamelCase : Any = FocalNetForMaskedImageModeling(__a )
model.to(__a )
model.eval()
lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : Tuple = model(__a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a__ ( self: str , __a: Optional[Any] , __a: Optional[Any] , __a: Tuple )-> str:
lowerCamelCase : Optional[Any] = self.type_sequence_label_size
lowerCamelCase : Optional[Any] = FocalNetForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : List[str] = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase : int = 1
lowerCamelCase : List[Any] = FocalNetForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : Optional[Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self: int )-> Optional[int]:
lowerCamelCase : str = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = config_and_inputs
lowerCamelCase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =(
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
snake_case__ : Optional[int] =(
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
snake_case__ : Tuple =False
snake_case__ : Dict =False
snake_case__ : Dict =False
snake_case__ : Tuple =False
snake_case__ : Optional[int] =False
def a__ ( self: Union[str, Any] )-> Optional[int]:
lowerCamelCase : List[str] = FocalNetModelTester(self )
lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__a , embed_dim=37 , has_text_modality=__a )
def a__ ( self: List[str] )-> List[str]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self: List[str] )-> Union[str, Any]:
return
def a__ ( self: Tuple )-> Tuple:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: List[Any] )-> Dict:
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def a__ ( self: List[Any] )-> Tuple:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def a__ ( self: List[str] )-> Dict:
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def a__ ( self: Optional[Any] )-> str:
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def a__ ( self: Optional[Any] )-> Dict:
pass
def a__ ( self: Optional[Any] )-> Dict:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : Any = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def a__ ( self: Tuple )-> Optional[int]:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : int = model_class(__a )
lowerCamelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Any = [*signature.parameters.keys()]
lowerCamelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def a__ ( self: str , __a: Union[str, Any] , __a: int , __a: Tuple , __a: List[str] )-> Union[str, Any]:
lowerCamelCase : List[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase : List[str] = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase : List[str] = outputs.hidden_states
lowerCamelCase : Tuple = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__a ) , __a )
# FocalNet has a different seq_length
lowerCamelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCamelCase : Optional[Any] = outputs.reshaped_hidden_states
self.assertEqual(len(__a ) , __a )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = reshaped_hidden_states[0].shape
lowerCamelCase : Tuple = (
reshaped_hidden_states[0].view(__a , __a , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def a__ ( self: Any )-> Any:
lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : List[str] = True
self.check_hidden_states_output(__a , __a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__a , __a , __a , __a )
def a__ ( self: str )-> Union[str, Any]:
lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : List[str] = 3
lowerCamelCase : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase : Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : str = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Union[str, Any] = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
@slow
def a__ ( self: Optional[int] )-> List[Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : List[str] = FocalNetModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def a__ ( self: str )-> Any:
lowerCamelCase , lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : int = _config_zero_init(__a )
for model_class in self.all_model_classes:
lowerCamelCase : int = model_class(config=__a )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class A__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def a__ ( self: Optional[int] )-> Optional[Any]:
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : Tuple = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(__a )
lowerCamelCase : Any = self.default_image_processor
lowerCamelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCamelCase : int = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
lowerCamelCase : Any = model(**__a )
# verify the logits
lowerCamelCase : Tuple = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase : List[str] = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str =(FocalNetBackbone,) if is_torch_available() else ()
snake_case__ : Optional[int] =FocalNetConfig
snake_case__ : str =False
def a__ ( self: Union[str, Any] )-> Tuple:
lowerCamelCase : str = FocalNetModelTester(self )
| 42
| 0
|
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Optional[int] =['''image_processor''', '''tokenizer''']
snake_case__ : Optional[int] ='''ViltImageProcessor'''
snake_case__ : Dict =('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self: List[Any] , __a: Optional[Any]=None , __a: Optional[Any]=None , **__a: Any )-> Any:
lowerCamelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __a , )
lowerCamelCase : Dict = kwargs.pop("""feature_extractor""" )
lowerCamelCase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__a , __a )
lowerCamelCase : Dict = self.image_processor
def __call__( self: int , __a: Optional[int] , __a: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __a: bool = True , __a: Union[bool, str, PaddingStrategy] = False , __a: Union[bool, str, TruncationStrategy] = None , __a: Optional[int] = None , __a: int = 0 , __a: Optional[int] = None , __a: Optional[bool] = None , __a: Optional[bool] = None , __a: bool = False , __a: bool = False , __a: bool = False , __a: bool = False , __a: bool = True , __a: Optional[Union[str, TensorType]] = None , **__a: Dict , )-> BatchEncoding:
lowerCamelCase : List[Any] = self.tokenizer(
text=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
# add pixel_values + pixel_mask
lowerCamelCase : Dict = self.image_processor(__a , return_tensors=__a )
encoding.update(__a )
return encoding
def a__ ( self: str , *__a: Dict , **__a: Optional[int] )-> int:
return self.tokenizer.batch_decode(*__a , **__a )
def a__ ( self: Union[str, Any] , *__a: Optional[int] , **__a: int )-> List[Any]:
return self.tokenizer.decode(*__a , **__a )
@property
def a__ ( self: Any )-> Dict:
lowerCamelCase : Optional[Any] = self.tokenizer.model_input_names
lowerCamelCase : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def a__ ( self: List[str] )-> List[Any]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __a , )
return self.image_processor_class
@property
def a__ ( self: List[str] )-> Optional[int]:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __a , )
return self.image_processor
| 703
|
"""simple docstring"""
import os
def snake_case ( ) -> Optional[Any]:
with open(os.path.dirname(UpperCamelCase__ ) + """/grid.txt""" ) as f:
lowerCamelCase : int = [] # noqa: E741
for _ in range(20 ):
l.append([int(UpperCamelCase__ ) for x in f.readline().split()] )
lowerCamelCase : Union[str, Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
lowerCamelCase : Dict = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
lowerCamelCase : Tuple = temp
# down
for i in range(17 ):
for j in range(20 ):
lowerCamelCase : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
lowerCamelCase : Optional[Any] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
lowerCamelCase : List[Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
lowerCamelCase : List[str] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
lowerCamelCase : List[str] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
lowerCamelCase : List[Any] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 42
| 0
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : UNetaDModel
snake_case__ : ScoreSdeVeScheduler
def __init__( self: Any , __a: UNetaDModel , __a: ScoreSdeVeScheduler )-> Dict:
super().__init__()
self.register_modules(unet=__a , scheduler=__a )
@torch.no_grad()
def __call__( self: List[str] , __a: int = 1 , __a: int = 2_000 , __a: Optional[Union[torch.Generator, List[torch.Generator]]] = None , __a: Optional[str] = "pil" , __a: bool = True , **__a: Any , )-> Union[ImagePipelineOutput, Tuple]:
lowerCamelCase : str = self.unet.config.sample_size
lowerCamelCase : List[Any] = (batch_size, 3, img_size, img_size)
lowerCamelCase : str = self.unet
lowerCamelCase : Any = randn_tensor(__a , generator=__a ) * self.scheduler.init_noise_sigma
lowerCamelCase : Any = sample.to(self.device )
self.scheduler.set_timesteps(__a )
self.scheduler.set_sigmas(__a )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowerCamelCase : int = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
lowerCamelCase : Optional[Any] = self.unet(__a , __a ).sample
lowerCamelCase : Any = self.scheduler.step_correct(__a , __a , generator=__a ).prev_sample
# prediction step
lowerCamelCase : Any = model(__a , __a ).sample
lowerCamelCase : int = self.scheduler.step_pred(__a , __a , __a , generator=__a )
lowerCamelCase : List[str] = output.prev_sample, output.prev_sample_mean
lowerCamelCase : Tuple = sample_mean.clamp(0 , 1 )
lowerCamelCase : Dict = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase : List[Any] = self.numpy_to_pil(__a )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__a )
| 704
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
__lowerCamelCase :Any = False
@skip_mps
class A__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] =StableDiffusionAttendAndExcitePipeline
snake_case__ : Any =False
snake_case__ : Dict =TEXT_TO_IMAGE_PARAMS
snake_case__ : Any =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''})
snake_case__ : Dict =TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case__ : str =TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def a__ ( cls: Dict )-> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(__a )
@classmethod
def a__ ( cls: Union[str, Any] )-> Any:
super().tearDownClass()
torch.use_deterministic_algorithms(__a )
def a__ ( self: Tuple )-> Union[str, Any]:
torch.manual_seed(0 )
lowerCamelCase : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , )
lowerCamelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
lowerCamelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
lowerCamelCase : Optional[int] = CLIPTextModel(__a )
lowerCamelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def a__ ( self: Tuple , __a: int , __a: Union[str, Any]=0 )-> Optional[Any]:
if str(__a ).startswith("""mps""" ):
lowerCamelCase : Tuple = torch.manual_seed(__a )
else:
lowerCamelCase : str = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : Dict = {
"""prompt""": """a cat and a frog""",
"""token_indices""": [2, 5],
"""generator""": generator,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""max_iter_to_alter""": 2,
"""thresholds""": {0: 0.7},
}
return inputs
def a__ ( self: Dict )-> str:
lowerCamelCase : Tuple = """cpu"""
lowerCamelCase : List[str] = self.get_dummy_components()
lowerCamelCase : List[Any] = self.pipeline_class(**__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Any = self.get_dummy_inputs(__a )
lowerCamelCase : Union[str, Any] = pipe(**__a ).images
lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
lowerCamelCase : Optional[Any] = np.array(
[0.63_90_53_64, 0.62_89_73_07, 0.48_59_90_17, 0.5_13_36_24, 0.5_55_00_48, 0.45_76_95_16, 0.50_32_69_73, 0.5_02_31_39, 0.45_38_44_96] )
lowerCamelCase : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__a , 1e-3 )
def a__ ( self: int )-> Optional[Any]:
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def a__ ( self: Union[str, Any] )-> Optional[int]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a__ ( self: Tuple )-> int:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def a__ ( self: Dict )-> List[Any]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def a__ ( self: Optional[int] )-> Dict:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def a__ ( self: Any )-> Tuple:
super().test_save_load_local(expected_max_difference=5e-4 )
def a__ ( self: str )-> str:
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class A__ ( unittest.TestCase):
"""simple docstring"""
@classmethod
def a__ ( cls: Any )-> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(__a )
@classmethod
def a__ ( cls: Dict )-> Optional[int]:
super().tearDownClass()
torch.use_deterministic_algorithms(__a )
def a__ ( self: int )-> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : List[Any] = torch.manual_seed(51 )
lowerCamelCase : List[str] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , safety_checker=__a , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
lowerCamelCase : Dict = """a painting of an elephant with glasses"""
lowerCamelCase : Any = [5, 7]
lowerCamelCase : Tuple = pipe(
prompt=__a , token_indices=__a , guidance_scale=7.5 , generator=__a , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0]
lowerCamelCase : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 42
| 0
|
"""simple docstring"""
def snake_case ( UpperCamelCase__ : Any , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] ) -> List[str]:
if height >= 1:
move_tower(height - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
move_disk(UpperCamelCase__ , UpperCamelCase__ )
move_tower(height - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] ) -> Tuple:
print("""moving disk from""" , UpperCamelCase__ , """to""" , UpperCamelCase__ )
def snake_case ( ) -> Dict:
lowerCamelCase : Dict = int(input("""Height of hanoi: """ ).strip() )
move_tower(UpperCamelCase__ , """A""" , """B""" , """C""" )
if __name__ == "__main__":
main()
| 705
|
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class A__ :
"""simple docstring"""
def __init__( self: List[str] , __a: List[str] , __a: Dict=13 , __a: Tuple=7 , __a: Dict=False , __a: str=True , __a: List[Any]=False , __a: Dict=True , __a: Any=33 , __a: Optional[Any]=32 , __a: List[Any]=5 , __a: Any=4 , __a: Dict=37 , __a: str="gelu" , __a: str=0.1 , __a: int=0.1 , __a: Optional[int]=512 , __a: List[Any]=16 , __a: int=2 , __a: int=0.02 , __a: Optional[int]=3 , __a: str=4 , __a: Tuple=None , )-> Tuple:
lowerCamelCase : Union[str, Any] = parent
lowerCamelCase : Tuple = batch_size
lowerCamelCase : Any = seq_length
lowerCamelCase : Any = is_training
lowerCamelCase : Tuple = use_input_mask
lowerCamelCase : int = use_token_type_ids
lowerCamelCase : List[str] = use_labels
lowerCamelCase : Optional[int] = vocab_size
lowerCamelCase : Tuple = hidden_size
lowerCamelCase : List[str] = num_hidden_layers
lowerCamelCase : Optional[int] = num_attention_heads
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : Union[str, Any] = hidden_dropout_prob
lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase : Any = max_position_embeddings
lowerCamelCase : str = type_vocab_size
lowerCamelCase : List[Any] = type_sequence_label_size
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : Union[str, Any] = num_labels
lowerCamelCase : Optional[Any] = num_choices
lowerCamelCase : Any = scope
def a__ ( self: Optional[int] )-> List[Any]:
lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : Dict = None
if self.use_input_mask:
lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase : Any = None
lowerCamelCase : int = None
lowerCamelCase : Union[str, Any] = None
if self.use_labels:
lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase : List[str] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self: Tuple )-> Union[str, Any]:
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def a__ ( self: List[Any] , __a: List[str] , __a: str , __a: Tuple , __a: List[str] , __a: List[str] , __a: str )-> int:
lowerCamelCase : Optional[int] = EsmModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a , attention_mask=__a )
lowerCamelCase : str = model(__a )
lowerCamelCase : Optional[Any] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ ( self: int , __a: Union[str, Any] , __a: Optional[int] , __a: List[str] , __a: str , __a: List[str] , __a: Tuple )-> int:
lowerCamelCase : str = EsmForMaskedLM(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self: List[str] , __a: List[Any] , __a: List[str] , __a: int , __a: Union[str, Any] , __a: List[Any] , __a: Tuple )-> List[str]:
lowerCamelCase : Tuple = self.num_labels
lowerCamelCase : Dict = EsmForTokenClassification(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self: Optional[int] )-> Optional[int]:
lowerCamelCase : Any = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Tuple = config_and_inputs
lowerCamelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Any =False
snake_case__ : Dict =(
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case__ : Dict =()
snake_case__ : Optional[int] =(
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : Any =True
def a__ ( self: Optional[int] )-> Optional[int]:
lowerCamelCase : Optional[Any] = EsmModelTester(self )
lowerCamelCase : Any = ConfigTester(self , config_class=__a , hidden_size=37 )
def a__ ( self: List[Any] )-> Optional[Any]:
self.config_tester.run_common_tests()
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: Tuple )-> Any:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase : Tuple = type
self.model_tester.create_and_check_model(*__a )
def a__ ( self: List[str] )-> Tuple:
lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def a__ ( self: Any )-> List[Any]:
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : int = EsmModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def a__ ( self: str )-> List[str]:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase : Union[str, Any] = EsmEmbeddings(config=__a )
lowerCamelCase : List[str] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowerCamelCase : Union[str, Any] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowerCamelCase : Optional[Any] = create_position_ids_from_input_ids(__a , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
def a__ ( self: Optional[int] )-> int:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase : Any = EsmEmbeddings(config=__a )
lowerCamelCase : Dict = torch.empty(2 , 4 , 30 )
lowerCamelCase : List[Any] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowerCamelCase : Any = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowerCamelCase : List[str] = embeddings.create_position_ids_from_inputs_embeds(__a )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def a__ ( self: Any )-> Optional[Any]:
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def a__ ( self: Dict )-> Dict:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def a__ ( self: List[str] )-> Dict:
pass
@require_torch
class A__ ( __lowercase):
"""simple docstring"""
@slow
def a__ ( self: Any )-> Union[str, Any]:
with torch.no_grad():
lowerCamelCase : Union[str, Any] = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
lowerCamelCase : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase : Tuple = model(__a )[0]
lowerCamelCase : Dict = 33
lowerCamelCase : List[str] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __a )
lowerCamelCase : Tuple = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
@slow
def a__ ( self: Dict )-> str:
with torch.no_grad():
lowerCamelCase : Any = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
lowerCamelCase : Optional[Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase : Any = model(__a )[0]
# compare the actual values for a slice.
lowerCamelCase : Tuple = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
| 42
| 0
|
"""simple docstring"""
def snake_case ( UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] ) -> Optional[int]:
lowerCamelCase : Union[str, Any] = """"""
for i in table:
res += inp[i - 1]
return res
def snake_case ( UpperCamelCase__ : str ) -> Dict:
return data[1:] + data[0]
def snake_case ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ) -> List[Any]:
lowerCamelCase : Union[str, Any] = """"""
for i in range(len(UpperCamelCase__ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def snake_case ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] ) -> Union[str, Any]:
lowerCamelCase : Union[str, Any] = int("""0b""" + data[0] + data[-1] , 2 )
lowerCamelCase : Optional[int] = int("""0b""" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def snake_case ( UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : str ) -> Union[str, Any]:
lowerCamelCase : int = message[:4]
lowerCamelCase : int = message[4:]
lowerCamelCase : List[str] = apply_table(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Tuple = xor(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : int = apply_sbox(UpperCamelCase__ , temp[:4] ) # noqa: E741
lowerCamelCase : List[str] = apply_sbox(UpperCamelCase__ , temp[4:] )
lowerCamelCase : str = """0""" * (2 - len(UpperCamelCase__ )) + l # noqa: E741
lowerCamelCase : List[Any] = """0""" * (2 - len(UpperCamelCase__ )) + r
lowerCamelCase : Optional[Any] = apply_table(l + r , UpperCamelCase__ )
lowerCamelCase : Dict = xor(UpperCamelCase__ , UpperCamelCase__ )
return temp + right
if __name__ == "__main__":
__lowerCamelCase :Dict = input('Enter 10 bit key: ')
__lowerCamelCase :int = input('Enter 8 bit message: ')
__lowerCamelCase :List[Any] = [6, 3, 7, 4, 8, 5, 10, 9]
__lowerCamelCase :Any = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
__lowerCamelCase :List[str] = [2, 4, 3, 1]
__lowerCamelCase :List[Any] = [2, 6, 3, 1, 4, 8, 5, 7]
__lowerCamelCase :Tuple = [4, 1, 3, 5, 7, 2, 8, 6]
__lowerCamelCase :str = [4, 1, 2, 3, 2, 3, 4, 1]
__lowerCamelCase :List[str] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
__lowerCamelCase :str = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
__lowerCamelCase :int = apply_table(key, paa_table)
__lowerCamelCase :str = temp[:5]
__lowerCamelCase :Tuple = temp[5:]
__lowerCamelCase :Union[str, Any] = left_shift(left)
__lowerCamelCase :List[str] = left_shift(right)
__lowerCamelCase :Optional[Any] = apply_table(left + right, pa_table)
__lowerCamelCase :str = left_shift(left)
__lowerCamelCase :Optional[int] = left_shift(right)
__lowerCamelCase :Union[str, Any] = left_shift(left)
__lowerCamelCase :int = left_shift(right)
__lowerCamelCase :Union[str, Any] = apply_table(left + right, pa_table)
# encryption
__lowerCamelCase :Union[str, Any] = apply_table(message, IP)
__lowerCamelCase :Any = function(expansion, sa, sa, keya, temp)
__lowerCamelCase :int = temp[4:] + temp[:4]
__lowerCamelCase :List[str] = function(expansion, sa, sa, keya, temp)
__lowerCamelCase :Tuple = apply_table(temp, IP_inv)
print('Cipher text is:', CT)
# decryption
__lowerCamelCase :Union[str, Any] = apply_table(CT, IP)
__lowerCamelCase :Union[str, Any] = function(expansion, sa, sa, keya, temp)
__lowerCamelCase :int = temp[4:] + temp[:4]
__lowerCamelCase :Optional[int] = function(expansion, sa, sa, keya, temp)
__lowerCamelCase :List[str] = apply_table(temp, IP_inv)
print('Plain text after decypting is:', PT)
| 706
|
"""simple docstring"""
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase :str = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =AlbertTokenizer
snake_case__ : Optional[Any] =AlbertTokenizerFast
snake_case__ : Optional[int] =True
snake_case__ : Any =True
snake_case__ : Optional[int] =True
def a__ ( self: Dict )-> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase : int = AlbertTokenizer(__a )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self: Tuple , __a: Tuple )-> Union[str, Any]:
lowerCamelCase : List[str] = """this is a test"""
lowerCamelCase : int = """this is a test"""
return input_text, output_text
def a__ ( self: Any )-> List[Any]:
lowerCamelCase : int = """<pad>"""
lowerCamelCase : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def a__ ( self: Tuple )-> str:
lowerCamelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(__a ) , 30_000 )
def a__ ( self: List[str] )-> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def a__ ( self: Optional[Any] )-> Union[str, Any]:
if not self.test_rust_tokenizer:
return
lowerCamelCase : str = self.get_tokenizer()
lowerCamelCase : Tuple = self.get_rust_tokenizer()
lowerCamelCase : Union[str, Any] = """I was born in 92000, and this is falsé."""
lowerCamelCase : List[str] = tokenizer.tokenize(__a )
lowerCamelCase : Tuple = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
lowerCamelCase : Dict = tokenizer.encode(__a , add_special_tokens=__a )
lowerCamelCase : List[str] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
lowerCamelCase : Any = self.get_rust_tokenizer()
lowerCamelCase : List[str] = tokenizer.encode(__a )
lowerCamelCase : str = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
def a__ ( self: Tuple )-> List[Any]:
lowerCamelCase : List[str] = AlbertTokenizer(__a , keep_accents=__a )
lowerCamelCase : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__a , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [48, 25, 21, 1_289] )
lowerCamelCase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(__a , [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] )
lowerCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def a__ ( self: Tuple )-> str:
lowerCamelCase : str = AlbertTokenizer(__a )
lowerCamelCase : Union[str, Any] = tokenizer.encode("""sequence builders""" )
lowerCamelCase : List[Any] = tokenizer.encode("""multi-sequence build""" )
lowerCamelCase : Any = tokenizer.build_inputs_with_special_tokens(__a )
lowerCamelCase : str = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def a__ ( self: Any )-> Dict:
# fmt: off
lowerCamelCase : Optional[Any] = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 42
| 0
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__lowerCamelCase :Any = ''
__lowerCamelCase :Union[str, Any] = ''
__lowerCamelCase :Optional[Any] = ''
__lowerCamelCase :Optional[Any] = 1 # (0 is vertical, 1 is horizontal)
def snake_case ( ) -> None:
lowerCamelCase : Any = get_dataset(UpperCamelCase__ , UpperCamelCase__ )
print("""Processing...""" )
lowerCamelCase : Tuple = update_image_and_anno(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for index, image in enumerate(UpperCamelCase__ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowerCamelCase : Optional[Any] = random_chars(32 )
lowerCamelCase : str = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0]
lowerCamelCase : Any = F'{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'
cva.imwrite(F'/{file_root}.jpg' , UpperCamelCase__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'Success {index+1}/{len(UpperCamelCase__ )} with {file_name}' )
lowerCamelCase : Dict = []
for anno in new_annos[index]:
lowerCamelCase : List[str] = F'{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'
annos_list.append(UpperCamelCase__ )
with open(F'/{file_root}.txt' , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> tuple[list, list]:
lowerCamelCase : int = []
lowerCamelCase : List[Any] = []
for label_file in glob.glob(os.path.join(UpperCamelCase__ , """*.txt""" ) ):
lowerCamelCase : Optional[Any] = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(UpperCamelCase__ ) as in_file:
lowerCamelCase : List[Any] = in_file.readlines()
lowerCamelCase : Any = os.path.join(UpperCamelCase__ , F'{label_name}.jpg' )
lowerCamelCase : Any = []
for obj_list in obj_lists:
lowerCamelCase : List[Any] = obj_list.rstrip("""\n""" ).split(""" """ )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(UpperCamelCase__ )
labels.append(UpperCamelCase__ )
return img_paths, labels
def snake_case ( UpperCamelCase__ : list , UpperCamelCase__ : list , UpperCamelCase__ : int = 1 ) -> tuple[list, list, list]:
lowerCamelCase : Optional[int] = []
lowerCamelCase : Any = []
lowerCamelCase : int = []
for idx in range(len(UpperCamelCase__ ) ):
lowerCamelCase : str = []
lowerCamelCase : int = img_list[idx]
path_list.append(UpperCamelCase__ )
lowerCamelCase : List[str] = anno_list[idx]
lowerCamelCase : str = cva.imread(UpperCamelCase__ )
if flip_type == 1:
lowerCamelCase : Dict = cva.flip(UpperCamelCase__ , UpperCamelCase__ )
for bbox in img_annos:
lowerCamelCase : Dict = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
lowerCamelCase : str = cva.flip(UpperCamelCase__ , UpperCamelCase__ )
for bbox in img_annos:
lowerCamelCase : str = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(UpperCamelCase__ )
new_imgs_list.append(UpperCamelCase__ )
return new_imgs_list, new_annos_lists, path_list
def snake_case ( UpperCamelCase__ : int = 32 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
lowerCamelCase : int = ascii_lowercase + digits
return "".join(random.choice(UpperCamelCase__ ) for _ in range(UpperCamelCase__ ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 707
|
"""simple docstring"""
__lowerCamelCase :List[Any] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__lowerCamelCase :Union[str, Any] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def snake_case ( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] ) -> list[int]:
lowerCamelCase : Tuple = True
lowerCamelCase : Any = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
order.append(UpperCamelCase__ )
return order
def snake_case ( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] ) -> list[int]:
lowerCamelCase : List[Any] = True
lowerCamelCase : int = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return component
def snake_case ( UpperCamelCase__ : dict[int, list[int]] ) -> list[list[int]]:
lowerCamelCase : int = len(UpperCamelCase__ ) * [False]
lowerCamelCase : dict[int, list[int]] = {vert: [] for vert in range(len(UpperCamelCase__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(UpperCamelCase__ )
lowerCamelCase : int = []
for i, was_visited in enumerate(UpperCamelCase__ ):
if not was_visited:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Tuple = []
lowerCamelCase : str = len(UpperCamelCase__ ) * [False]
for i in range(len(UpperCamelCase__ ) ):
lowerCamelCase : Any = order[len(UpperCamelCase__ ) - i - 1]
if not visited[vert]:
lowerCamelCase : List[str] = find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
components_list.append(UpperCamelCase__ )
return components_list
| 42
| 0
|
"""simple docstring"""
from math import pow
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , ) -> tuple[int, int]:
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
lowerCamelCase : Dict = int(pow(UpperCamelCase__ , UpperCamelCase__ ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
lowerCamelCase : Union[str, Any] = backtrack(
UpperCamelCase__ , UpperCamelCase__ , current_number + 1 , UpperCamelCase__ , UpperCamelCase__ )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
lowerCamelCase : Any = backtrack(
UpperCamelCase__ , UpperCamelCase__ , current_number + 1 , UpperCamelCase__ , UpperCamelCase__ )
return current_sum, solutions_count
def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
if not (1 <= needed_sum <= 1000 and 2 <= power <= 10):
raise ValueError(
"""Invalid input\n"""
"""needed_sum must be between 1 and 1000, power between 2 and 10.""" )
return backtrack(UpperCamelCase__ , UpperCamelCase__ , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :str = logging.get_logger(__name__)
__lowerCamelCase :Any = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : List[Any] ='''time_series_transformer'''
snake_case__ : List[Any] ={
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self: List[str] , __a: Optional[int] = None , __a: Optional[int] = None , __a: str = "student_t" , __a: str = "nll" , __a: int = 1 , __a: List[int] = [1, 2, 3, 4, 5, 6, 7] , __a: Optional[Union[str, bool]] = "mean" , __a: int = 0 , __a: int = 0 , __a: int = 0 , __a: int = 0 , __a: Optional[List[int]] = None , __a: Optional[List[int]] = None , __a: int = 32 , __a: int = 32 , __a: int = 2 , __a: int = 2 , __a: int = 2 , __a: int = 2 , __a: bool = True , __a: str = "gelu" , __a: int = 64 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: int = 100 , __a: float = 0.02 , __a: Tuple=True , **__a: str , )-> Any:
# time series specific configuration
lowerCamelCase : str = prediction_length
lowerCamelCase : Optional[Any] = context_length or prediction_length
lowerCamelCase : Tuple = distribution_output
lowerCamelCase : Any = loss
lowerCamelCase : List[Any] = input_size
lowerCamelCase : int = num_time_features
lowerCamelCase : Dict = lags_sequence
lowerCamelCase : Optional[int] = scaling
lowerCamelCase : int = num_dynamic_real_features
lowerCamelCase : Tuple = num_static_real_features
lowerCamelCase : Any = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
lowerCamelCase : int = cardinality
else:
lowerCamelCase : Dict = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
lowerCamelCase : str = embedding_dimension
else:
lowerCamelCase : str = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCamelCase : Any = num_parallel_samples
# Transformer architecture configuration
lowerCamelCase : Any = input_size * len(__a ) + self._number_of_features
lowerCamelCase : List[str] = d_model
lowerCamelCase : Tuple = encoder_attention_heads
lowerCamelCase : Optional[int] = decoder_attention_heads
lowerCamelCase : Union[str, Any] = encoder_ffn_dim
lowerCamelCase : str = decoder_ffn_dim
lowerCamelCase : str = encoder_layers
lowerCamelCase : Any = decoder_layers
lowerCamelCase : Optional[int] = dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Tuple = activation_dropout
lowerCamelCase : Optional[int] = encoder_layerdrop
lowerCamelCase : int = decoder_layerdrop
lowerCamelCase : Optional[int] = activation_function
lowerCamelCase : Optional[Any] = init_std
lowerCamelCase : Optional[Any] = use_cache
super().__init__(is_encoder_decoder=__a , **__a )
@property
def a__ ( self: int )-> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 42
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase :List[str] = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[int] = ['OwlViTFeatureExtractor']
__lowerCamelCase :List[str] = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[Any] = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 709
|
"""simple docstring"""
from __future__ import annotations
__lowerCamelCase :int = 10
def snake_case ( UpperCamelCase__ : list[int] ) -> list[int]:
lowerCamelCase : int = 1
lowerCamelCase : Union[str, Any] = max(UpperCamelCase__ )
while placement <= max_digit:
# declare and initialize empty buckets
lowerCamelCase : list[list] = [[] for _ in range(UpperCamelCase__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
lowerCamelCase : Any = int((i / placement) % RADIX )
buckets[tmp].append(UpperCamelCase__ )
# put each buckets' contents into list_of_ints
lowerCamelCase : Dict = 0
for b in range(UpperCamelCase__ ):
for i in buckets[b]:
lowerCamelCase : List[str] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
| 0
|
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class A__ ( unittest.TestCase):
"""simple docstring"""
@slow
def a__ ( self: Any )-> List[str]:
lowerCamelCase : int = FlaxXLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
lowerCamelCase : List[str] = AutoTokenizer.from_pretrained("""xlm-roberta-base""" )
lowerCamelCase : Optional[Any] = """The dog is cute and lives in the garden house"""
lowerCamelCase : Optional[Any] = jnp.array([tokenizer.encode(__a )] )
lowerCamelCase : Tuple = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
lowerCamelCase : List[str] = jnp.array(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
lowerCamelCase : List[Any] = model(__a )["""last_hidden_state"""]
self.assertEqual(output.shape , __a )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , __a , atol=1e-3 ) )
| 710
|
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=None ) -> Tuple:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match'
lowerCamelCase : Dict = nn.Parameter(UpperCamelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match'
lowerCamelCase : Any = nn.Parameter(UpperCamelCase__ )
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ) -> Union[str, Any]:
# set torch weights for 1-to-1 comparison
lowerCamelCase : Dict = np.asarray(weights[0] )
lowerCamelCase : List[Any] = np.asarray(weights[1] )
lowerCamelCase : List[str] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any ) -> List[Any]:
# set torch weights for 1-to-1 comparison
lowerCamelCase : Tuple = np.asarray(weights[0] )
lowerCamelCase : Any = np.asarray(weights[1] )
lowerCamelCase : List[Any] = np.asarray(weights[2] )
lowerCamelCase : List[str] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def snake_case ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ) -> Optional[Any]:
# layernorm 1
lowerCamelCase : str = weights[0][0][0]
lowerCamelCase : Optional[int] = np.asarray(layer_norm_a[0] )
lowerCamelCase : Tuple = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# lsh weights + output
lowerCamelCase : List[Any] = weights[0][1]
if len(UpperCamelCase__ ) < 4:
set_layer_weights_in_torch_lsh(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ )
else:
set_layer_weights_in_torch_local(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ )
# intermediate weighs
lowerCamelCase : int = weights[2][0][1][2]
# Chunked Feed Forward
if len(UpperCamelCase__ ) == 4:
lowerCamelCase : Dict = intermediate_weights[2]
# layernorm 2
lowerCamelCase : Optional[int] = np.asarray(intermediate_weights[0][0] )
lowerCamelCase : Tuple = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# intermediate dense
lowerCamelCase : Optional[Any] = np.asarray(intermediate_weights[1][0] )
lowerCamelCase : Union[str, Any] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
# intermediate out
lowerCamelCase : Optional[int] = np.asarray(intermediate_weights[4][0] )
lowerCamelCase : List[Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int ) -> List[Any]:
# reformer model
lowerCamelCase : List[Any] = torch_model.reformer
# word embeds
lowerCamelCase : Union[str, Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(UpperCamelCase__ ) , )
if isinstance(weights[3] , UpperCamelCase__ ):
lowerCamelCase : Union[str, Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCamelCase : str = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'{position_embeddings[emb_idx]} emb does not match'
lowerCamelCase : Dict = nn.Parameter(torch.tensor(UpperCamelCase__ ) )
lowerCamelCase : int = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
UpperCamelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCamelCase : Dict = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# output layer norm
lowerCamelCase : Any = np.asarray(weights[7][0] )
lowerCamelCase : List[str] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# output embeddings
lowerCamelCase : List[Any] = np.asarray(weights[9][0] )
lowerCamelCase : Optional[int] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str ) -> Optional[int]:
# Initialise PyTorch model
lowerCamelCase : Union[str, Any] = ReformerConfig.from_json_file(UpperCamelCase__ )
print(F'Building PyTorch model from configuration: {config}' )
lowerCamelCase : str = ReformerModelWithLMHead(UpperCamelCase__ )
with open(UpperCamelCase__ , """rb""" ) as f:
lowerCamelCase : str = pickle.load(UpperCamelCase__ )["""weights"""]
set_model_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , config.hidden_size )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
__lowerCamelCase :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCamelCase :Optional[int] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 42
| 0
|
"""simple docstring"""
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class A__ :
"""simple docstring"""
snake_case__ : Optional[Any] =None
def a__ ( self: List[Any] )-> int:
lowerCamelCase : int = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase : int = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , __a )
def a__ ( self: Optional[Any] )-> int:
lowerCamelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase : str = os.path.join(__a , """feat_extract.json""" )
feat_extract_first.to_json_file(__a )
lowerCamelCase : Optional[Any] = self.feature_extraction_class.from_json_file(__a )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def a__ ( self: Union[str, Any] )-> List[str]:
lowerCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase : Any = feat_extract_first.save_pretrained(__a )[0]
check_json_file_has_correct_format(__a )
lowerCamelCase : Optional[int] = self.feature_extraction_class.from_pretrained(__a )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def a__ ( self: int )-> List[str]:
lowerCamelCase : Tuple = self.feature_extraction_class()
self.assertIsNotNone(__a )
| 711
|
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class A__ ( nn.Module):
"""simple docstring"""
def __init__( self: Dict )-> Dict:
super().__init__()
lowerCamelCase : Tuple = nn.Linear(3 , 4 )
lowerCamelCase : Optional[Any] = nn.BatchNormad(4 )
lowerCamelCase : Optional[Any] = nn.Linear(4 , 5 )
def a__ ( self: List[str] , __a: List[Any] )-> Optional[Any]:
return self.lineara(self.batchnorm(self.lineara(__a ) ) )
class A__ ( __lowercase):
"""simple docstring"""
def a__ ( self: Tuple , __a: int , *__a: Any , **__a: Tuple )-> Tuple:
return (args[0] + 1,) + args[1:], kwargs
class A__ ( __lowercase):
"""simple docstring"""
def a__ ( self: Optional[int] , __a: List[str] , __a: List[Any] )-> List[str]:
return output + 1
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: int )-> str:
lowerCamelCase : List[str] = ModelForTest()
lowerCamelCase : Dict = ModelHook()
add_hook_to_module(__a , __a )
self.assertEqual(test_model._hf_hook , __a )
self.assertTrue(hasattr(__a , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__a )
self.assertFalse(hasattr(__a , """_hf_hook""" ) )
self.assertFalse(hasattr(__a , """_old_forward""" ) )
def a__ ( self: int )-> str:
lowerCamelCase : List[str] = ModelForTest()
lowerCamelCase : Union[str, Any] = ModelHook()
add_hook_to_module(__a , __a )
add_hook_to_module(__a , __a , append=__a )
self.assertEqual(isinstance(test_model._hf_hook , __a ) , __a )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__a , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__a )
self.assertFalse(hasattr(__a , """_hf_hook""" ) )
self.assertFalse(hasattr(__a , """_old_forward""" ) )
def a__ ( self: List[Any] )-> List[str]:
lowerCamelCase : str = ModelForTest()
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : Union[str, Any] = test_model(x + 1 )
lowerCamelCase : Optional[int] = test_model(x + 2 )
lowerCamelCase : List[Any] = PreForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[int] = test_model(__a )
self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCamelCase : Dict = PreForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Tuple = test_model(__a )
self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCamelCase : Any = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[Any] = test_model(__a )
assert torch.allclose(__a , __a , atol=1e-5 )
def a__ ( self: Any )-> Optional[int]:
lowerCamelCase : str = ModelForTest()
lowerCamelCase : List[str] = torch.randn(2 , 3 )
lowerCamelCase : int = test_model(__a )
lowerCamelCase : Dict = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Tuple = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCamelCase : str = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[Any] = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCamelCase : Union[str, Any] = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__a , __a )
lowerCamelCase : str = test_model(__a )
assert torch.allclose(__a , output + 2 , atol=1e-5 )
def a__ ( self: int )-> Dict:
lowerCamelCase : List[Any] = ModelForTest()
lowerCamelCase : Optional[int] = torch.randn(2 , 3 )
lowerCamelCase : List[str] = test_model(__a )
lowerCamelCase : Any = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : str = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 ) )
self.assertTrue(outputa.requires_grad )
lowerCamelCase : Optional[int] = True
lowerCamelCase : Optional[int] = test_model(__a )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def a__ ( self: List[str] )-> Union[str, Any]:
lowerCamelCase : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
lowerCamelCase : str = torch.randn(2 , 3 )
lowerCamelCase : Dict = model(__a )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__a , AlignDevicesHook(io_same_device=__a ) )
lowerCamelCase : Optional[int] = torch.randn(2 , 3 ).to(0 )
lowerCamelCase : str = model(__a )
self.assertEqual(output.device , torch.device(0 ) )
def a__ ( self: List[str] )-> Tuple:
lowerCamelCase : Union[str, Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : Tuple = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Optional[Any] = torch.randn(2 , 3 )
lowerCamelCase : Optional[Any] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
lowerCamelCase : Any = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : int = torch.randn(2 , 3 )
lowerCamelCase : Optional[int] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def a__ ( self: Any )-> List[str]:
lowerCamelCase : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : int = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__a , execution_device=__a , offload=__a )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(__a )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : Optional[Any] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__a , execution_device=__a , offload=__a , offload_buffers=__a )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : Optional[int] = torch.randn(2 , 3 )
lowerCamelCase : int = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def a__ ( self: Optional[Any] )-> List[Any]:
lowerCamelCase : List[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : Any = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__a , execution_device=__a , offload=__a , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(__a )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : int = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__a , execution_device=__a , offload=__a , weights_map=model.state_dict() , offload_buffers=__a , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : Tuple = torch.randn(2 , 3 )
lowerCamelCase : Any = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 42
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Any =OpenAIGPTTokenizer
snake_case__ : Tuple =OpenAIGPTTokenizerFast
snake_case__ : Any =True
snake_case__ : str =False
def a__ ( self: Tuple )-> Optional[int]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase : Optional[int] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
lowerCamelCase : Optional[Any] = dict(zip(__a , range(len(__a ) ) ) )
lowerCamelCase : Optional[int] = ["""#version: 0.2""", """l o""", """lo w""", """e r</w>""", """"""]
lowerCamelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(__a ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(__a ) )
def a__ ( self: Dict , __a: Dict )-> str:
return "lower newer", "lower newer"
def a__ ( self: int )-> int:
lowerCamelCase : List[Any] = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
lowerCamelCase : str = """lower"""
lowerCamelCase : str = ["""low""", """er</w>"""]
lowerCamelCase : List[Any] = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
lowerCamelCase : Union[str, Any] = tokens + ["""<unk>"""]
lowerCamelCase : Tuple = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
def a__ ( self: List[str] , __a: Optional[int]=15 )-> Dict:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCamelCase : int = self.rust_tokenizer_class.from_pretrained(__a , **__a )
# Simple input
lowerCamelCase : List[Any] = """This is a simple input"""
lowerCamelCase : Dict = ["""This is a simple input 1""", """This is a simple input 2"""]
lowerCamelCase : Any = ("""This is a simple input""", """This is a pair""")
lowerCamelCase : Any = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="""max_length""" )
# Simple input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="""max_length""" )
# Simple input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="""max_length""" , )
# Pair input
self.assertRaises(__a , tokenizer_r.encode , __a , max_length=__a , padding="""max_length""" )
# Pair input
self.assertRaises(__a , tokenizer_r.encode_plus , __a , max_length=__a , padding="""max_length""" )
# Pair input
self.assertRaises(
__a , tokenizer_r.batch_encode_plus , __a , max_length=__a , padding="""max_length""" , )
def a__ ( self: Optional[Any] )-> List[str]:
pass
@require_ftfy
@require_spacy
@require_tokenizers
class A__ ( __lowercase):
"""simple docstring"""
pass
| 712
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCamelCase :Optional[Any] = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Union[str, Any] = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase :Optional[Any] = {
'configuration_nllb_moe': [
'NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP',
'NllbMoeConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :List[Any] = [
'NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST',
'NllbMoeForConditionalGeneration',
'NllbMoeModel',
'NllbMoePreTrainedModel',
'NllbMoeTop2Router',
'NllbMoeSparseMLP',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
__lowerCamelCase :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 713
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self: Optional[Any] , __a: Union[str, Any] , __a: Optional[Any]=13 , __a: Optional[Any]=32 , __a: Dict=3 , __a: int=4 , __a: Dict=[10, 20, 30, 40] , __a: int=[2, 2, 3, 2] , __a: Any=True , __a: List[Any]=True , __a: Any=37 , __a: Optional[int]="gelu" , __a: List[str]=10 , __a: Optional[int]=0.02 , __a: Dict=["stage2", "stage3", "stage4"] , __a: List[str]=[2, 3, 4] , __a: List[str]=None , )-> Union[str, Any]:
lowerCamelCase : Optional[int] = parent
lowerCamelCase : Optional[int] = batch_size
lowerCamelCase : Any = image_size
lowerCamelCase : Tuple = num_channels
lowerCamelCase : str = num_stages
lowerCamelCase : List[str] = hidden_sizes
lowerCamelCase : str = depths
lowerCamelCase : Dict = is_training
lowerCamelCase : Optional[Any] = use_labels
lowerCamelCase : List[str] = intermediate_size
lowerCamelCase : List[str] = hidden_act
lowerCamelCase : List[str] = num_labels
lowerCamelCase : Union[str, Any] = initializer_range
lowerCamelCase : List[Any] = out_features
lowerCamelCase : Optional[Any] = out_indices
lowerCamelCase : int = scope
def a__ ( self: str )-> Optional[Any]:
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Dict = None
if self.use_labels:
lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase : Any = self.get_config()
return config, pixel_values, labels
def a__ ( self: Dict )-> Union[str, Any]:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a__ ( self: Optional[Any] , __a: List[Any] , __a: Any , __a: int )-> List[Any]:
lowerCamelCase : Optional[int] = ConvNextModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ ( self: int , __a: Union[str, Any] , __a: List[Any] , __a: Tuple )-> Optional[int]:
lowerCamelCase : str = ConvNextForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self: List[Any] , __a: Any , __a: Optional[int] , __a: Tuple )-> List[str]:
lowerCamelCase : List[str] = ConvNextBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCamelCase : Tuple = None
lowerCamelCase : List[str] = ConvNextBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self: Optional[Any] )-> Any:
lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = config_and_inputs
lowerCamelCase : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : int =(
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
snake_case__ : str =(
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
snake_case__ : Union[str, Any] =True
snake_case__ : Optional[int] =False
snake_case__ : Tuple =False
snake_case__ : Union[str, Any] =False
snake_case__ : Tuple =False
def a__ ( self: Optional[Any] )-> Union[str, Any]:
lowerCamelCase : Tuple = ConvNextModelTester(self )
lowerCamelCase : List[Any] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def a__ ( self: Optional[int] )-> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self: Optional[int] )-> Optional[Any]:
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def a__ ( self: int )-> Dict:
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def a__ ( self: Dict )-> Optional[Any]:
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def a__ ( self: int )-> List[Any]:
pass
def a__ ( self: Union[str, Any] )-> int:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Any = model_class(__a )
lowerCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
lowerCamelCase : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def a__ ( self: Optional[int] )-> str:
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: str )-> int:
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def a__ ( self: int )-> Optional[int]:
def check_hidden_states_output(__a: Tuple , __a: int , __a: Tuple ):
lowerCamelCase : str = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase : Tuple = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__a ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase , lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : List[Any] = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Tuple = True
check_hidden_states_output(__a , __a , __a )
def a__ ( self: Dict )-> Optional[Any]:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def a__ ( self: Optional[Any] )-> Tuple:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : str = ConvNextModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case ( ) -> Optional[int]:
lowerCamelCase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def a__ ( self: Dict )-> Union[str, Any]:
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def a__ ( self: List[str] )-> Dict:
lowerCamelCase : Tuple = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(__a )
lowerCamelCase : Dict = self.default_image_processor
lowerCamelCase : Union[str, Any] = prepare_img()
lowerCamelCase : Optional[Any] = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
lowerCamelCase : Any = model(**__a )
# verify the logits
lowerCamelCase : Union[str, Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase : Tuple = torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
@require_torch
class A__ ( unittest.TestCase , __lowercase):
"""simple docstring"""
snake_case__ : Union[str, Any] =(ConvNextBackbone,) if is_torch_available() else ()
snake_case__ : Optional[Any] =ConvNextConfig
snake_case__ : Optional[Any] =False
def a__ ( self: List[str] )-> int:
lowerCamelCase : Dict = ConvNextModelTester(self )
| 42
| 0
|
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> np.ndarray:
# prepare kernel
# the kernel size have to be odd
if (ksize % 2) == 0:
lowerCamelCase : Tuple = ksize + 1
lowerCamelCase : Optional[Any] = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(UpperCamelCase__ ):
for x in range(UpperCamelCase__ ):
# distance from center
lowerCamelCase : int = x - ksize // 2
lowerCamelCase : Optional[Any] = y - ksize // 2
# degree to radiant
lowerCamelCase : Dict = theta / 180 * np.pi
lowerCamelCase : Dict = np.cos(_theta )
lowerCamelCase : str = np.sin(_theta )
# get kernel x
lowerCamelCase : List[str] = cos_theta * px + sin_theta * py
# get kernel y
lowerCamelCase : Tuple = -sin_theta * px + cos_theta * py
# fill kernel
lowerCamelCase : int = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__lowerCamelCase :int = imread('../image_data/lena.jpg')
# turn image in gray scale value
__lowerCamelCase :int = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__lowerCamelCase :str = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
__lowerCamelCase :int = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__lowerCamelCase :int = out / out.max() * 255
__lowerCamelCase :Tuple = out.astype(np.uinta)
imshow('Original', gray)
imshow('Gabor filter with 20x20 mask and 6 directions', out)
waitKey(0)
| 714
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Optional[int] = logging.get_logger(__name__)
__lowerCamelCase :List[str] = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Optional[Any] ='''realm'''
def __init__( self: Union[str, Any] , __a: List[Any]=30_522 , __a: List[Any]=768 , __a: List[Any]=128 , __a: Union[str, Any]=12 , __a: Union[str, Any]=12 , __a: Optional[Any]=8 , __a: Dict=3_072 , __a: List[Any]="gelu_new" , __a: List[Any]=0.1 , __a: Tuple=0.1 , __a: Optional[Any]=512 , __a: Optional[int]=2 , __a: str=0.02 , __a: int=1e-1_2 , __a: Optional[Any]=256 , __a: Any=10 , __a: Dict=1e-3 , __a: Optional[Any]=5 , __a: Dict=320 , __a: Tuple=13_353_718 , __a: List[Any]=5_000 , __a: Dict=1 , __a: int=0 , __a: Dict=2 , **__a: List[str] , )-> Any:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
# Common config
lowerCamelCase : Optional[Any] = vocab_size
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : Dict = hidden_size
lowerCamelCase : Dict = retriever_proj_size
lowerCamelCase : Optional[Any] = num_hidden_layers
lowerCamelCase : List[str] = num_attention_heads
lowerCamelCase : Tuple = num_candidates
lowerCamelCase : int = intermediate_size
lowerCamelCase : Dict = hidden_act
lowerCamelCase : List[str] = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Optional[int] = initializer_range
lowerCamelCase : Dict = type_vocab_size
lowerCamelCase : Optional[Any] = layer_norm_eps
# Reader config
lowerCamelCase : List[str] = span_hidden_size
lowerCamelCase : Dict = max_span_width
lowerCamelCase : Optional[Any] = reader_layer_norm_eps
lowerCamelCase : Optional[int] = reader_beam_size
lowerCamelCase : List[Any] = reader_seq_len
# Retrieval config
lowerCamelCase : int = num_block_records
lowerCamelCase : Dict = searcher_beam_size
| 42
| 0
|
"""simple docstring"""
from math import isclose, sqrt
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> tuple[float, float, float]:
lowerCamelCase : List[Any] = point_y / 4 / point_x
lowerCamelCase : Dict = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
lowerCamelCase : Tuple = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
lowerCamelCase : Optional[int] = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
lowerCamelCase : Tuple = outgoing_gradient**2 + 4
lowerCamelCase : List[Any] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
lowerCamelCase : Optional[int] = (point_y - outgoing_gradient * point_x) ** 2 - 100
lowerCamelCase : Dict = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
lowerCamelCase : Union[str, Any] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
lowerCamelCase : Union[str, Any] = x_minus if isclose(UpperCamelCase__ , UpperCamelCase__ ) else x_plus
lowerCamelCase : str = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def snake_case ( UpperCamelCase__ : float = 1.4 , UpperCamelCase__ : float = -9.6 ) -> int:
lowerCamelCase : int = 0
lowerCamelCase : float = first_x_coord
lowerCamelCase : float = first_y_coord
lowerCamelCase : float = (10.1 - point_y) / (0.0 - point_x)
while not (-0.0_1 <= point_x <= 0.0_1 and point_y > 0):
lowerCamelCase : Any = next_point(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(f"""{solution() = }""")
| 715
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Tuple = logging.get_logger(__name__)
__lowerCamelCase :Any = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple ='''glpn'''
def __init__( self: Dict , __a: List[str]=3 , __a: Optional[int]=4 , __a: Dict=[2, 2, 2, 2] , __a: str=[8, 4, 2, 1] , __a: Optional[int]=[32, 64, 160, 256] , __a: Dict=[7, 3, 3, 3] , __a: Dict=[4, 2, 2, 2] , __a: Optional[Any]=[1, 2, 5, 8] , __a: Tuple=[4, 4, 4, 4] , __a: int="gelu" , __a: Union[str, Any]=0.0 , __a: str=0.0 , __a: Union[str, Any]=0.02 , __a: str=0.1 , __a: Union[str, Any]=1e-6 , __a: Any=64 , __a: Dict=10 , __a: Union[str, Any]=-1 , **__a: Optional[Any] , )-> Dict:
super().__init__(**__a )
lowerCamelCase : Dict = num_channels
lowerCamelCase : Any = num_encoder_blocks
lowerCamelCase : Dict = depths
lowerCamelCase : List[str] = sr_ratios
lowerCamelCase : Dict = hidden_sizes
lowerCamelCase : Tuple = patch_sizes
lowerCamelCase : Optional[int] = strides
lowerCamelCase : Optional[Any] = mlp_ratios
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : List[str] = hidden_act
lowerCamelCase : Any = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : List[Any] = initializer_range
lowerCamelCase : Dict = drop_path_rate
lowerCamelCase : Any = layer_norm_eps
lowerCamelCase : Optional[Any] = decoder_hidden_size
lowerCamelCase : Tuple = max_depth
lowerCamelCase : Optional[Any] = head_in_index
| 42
| 0
|
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple =(DDPMScheduler,)
def a__ ( self: Any , **__a: int )-> int:
lowerCamelCase : str = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**__a )
return config
def a__ ( self: Union[str, Any] )-> Any:
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__a )
def a__ ( self: str )-> Tuple:
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def a__ ( self: Any )-> Optional[Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__a )
def a__ ( self: str )-> str:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__a )
def a__ ( self: Union[str, Any] )-> List[Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__a )
def a__ ( self: Optional[Any] )-> Union[str, Any]:
self.check_over_configs(thresholding=__a )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__a , prediction_type=__a , sample_max_value=__a , )
def a__ ( self: Tuple )-> List[Any]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def a__ ( self: Union[str, Any] )-> Optional[int]:
for t in [0, 500, 999]:
self.check_over_forward(time_step=__a )
def a__ ( self: Optional[int] )-> int:
lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
lowerCamelCase : Optional[int] = self.get_scheduler_config()
lowerCamelCase : Tuple = scheduler_class(**__a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def a__ ( self: str )-> int:
lowerCamelCase : Any = self.scheduler_classes[0]
lowerCamelCase : Tuple = self.get_scheduler_config()
lowerCamelCase : List[str] = scheduler_class(**__a )
lowerCamelCase : Optional[int] = len(__a )
lowerCamelCase : Dict = self.dummy_model()
lowerCamelCase : List[str] = self.dummy_sample_deter
lowerCamelCase : Optional[int] = torch.manual_seed(0 )
for t in reversed(range(__a ) ):
# 1. predict noise residual
lowerCamelCase : Union[str, Any] = model(__a , __a )
# 2. predict previous mean of sample x_t-1
lowerCamelCase : Optional[Any] = scheduler.step(__a , __a , __a , generator=__a ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase : Tuple = pred_prev_sample
lowerCamelCase : List[Any] = torch.sum(torch.abs(__a ) )
lowerCamelCase : Union[str, Any] = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 258.9_606 ) < 1e-2
assert abs(result_mean.item() - 0.33_72 ) < 1e-3
def a__ ( self: Any )-> str:
lowerCamelCase : Dict = self.scheduler_classes[0]
lowerCamelCase : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
lowerCamelCase : List[Any] = scheduler_class(**__a )
lowerCamelCase : Tuple = len(__a )
lowerCamelCase : Union[str, Any] = self.dummy_model()
lowerCamelCase : int = self.dummy_sample_deter
lowerCamelCase : Any = torch.manual_seed(0 )
for t in reversed(range(__a ) ):
# 1. predict noise residual
lowerCamelCase : List[str] = model(__a , __a )
# 2. predict previous mean of sample x_t-1
lowerCamelCase : List[Any] = scheduler.step(__a , __a , __a , generator=__a ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase : List[Any] = pred_prev_sample
lowerCamelCase : List[str] = torch.sum(torch.abs(__a ) )
lowerCamelCase : List[str] = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 202.0_296 ) < 1e-2
assert abs(result_mean.item() - 0.26_31 ) < 1e-3
def a__ ( self: Any )-> Dict:
lowerCamelCase : List[Any] = self.scheduler_classes[0]
lowerCamelCase : Dict = self.get_scheduler_config()
lowerCamelCase : Union[str, Any] = scheduler_class(**__a )
lowerCamelCase : str = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__a )
lowerCamelCase : Dict = scheduler.timesteps
for i, timestep in enumerate(__a ):
if i == len(__a ) - 1:
lowerCamelCase : str = -1
else:
lowerCamelCase : Optional[int] = timesteps[i + 1]
lowerCamelCase : Optional[Any] = scheduler.previous_timestep(__a )
lowerCamelCase : Optional[int] = prev_t.item()
self.assertEqual(__a , __a )
def a__ ( self: Any )-> Optional[Any]:
lowerCamelCase : Union[str, Any] = self.scheduler_classes[0]
lowerCamelCase : int = self.get_scheduler_config()
lowerCamelCase : str = scheduler_class(**__a )
lowerCamelCase : str = [100, 87, 50, 51, 0]
with self.assertRaises(__a , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=__a )
def a__ ( self: Optional[Any] )-> List[str]:
lowerCamelCase : List[str] = self.scheduler_classes[0]
lowerCamelCase : Dict = self.get_scheduler_config()
lowerCamelCase : Optional[Any] = scheduler_class(**__a )
lowerCamelCase : Tuple = [100, 87, 50, 1, 0]
lowerCamelCase : Any = len(__a )
with self.assertRaises(__a , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=__a , timesteps=__a )
def a__ ( self: Optional[int] )-> Any:
lowerCamelCase : List[Any] = self.scheduler_classes[0]
lowerCamelCase : List[str] = self.get_scheduler_config()
lowerCamelCase : Union[str, Any] = scheduler_class(**__a )
lowerCamelCase : Tuple = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__a , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=__a )
| 716
|
"""simple docstring"""
from __future__ import annotations
import math
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : int ) -> float:
lowerCamelCase : Dict = u
for i in range(1 , UpperCamelCase__ ):
lowerCamelCase : List[str] = temp * (u - i)
return temp
def snake_case ( ) -> None:
lowerCamelCase : List[Any] = int(input("""enter the numbers of values: """ ) )
lowerCamelCase : list[list[float]] = []
for _ in range(UpperCamelCase__ ):
y.append([] )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
y[i].append(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = 0
print("""enter the values of parameters in a list: """ )
lowerCamelCase : Any = list(map(UpperCamelCase__ , input().split() ) )
print("""enter the values of corresponding parameters: """ )
for i in range(UpperCamelCase__ ):
lowerCamelCase : int = float(input() )
lowerCamelCase : Dict = int(input("""enter the value to interpolate: """ ) )
lowerCamelCase : List[Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , UpperCamelCase__ ):
for j in range(n - i ):
lowerCamelCase : str = y[j + 1][i - 1] - y[j][i - 1]
lowerCamelCase : Any = y[0][0]
for i in range(1 , UpperCamelCase__ ):
summ += (ucal(UpperCamelCase__ , UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 42
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[Any] =GPTSanJapaneseTokenizer
snake_case__ : Optional[Any] =False
snake_case__ : Optional[int] ={'''do_clean_text''': False, '''add_prefix_space''': False}
def a__ ( self: List[Any] )-> int:
super().setUp()
# fmt: off
lowerCamelCase : Dict = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
lowerCamelCase : Tuple = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
lowerCamelCase : Tuple = {"""unk_token""": """<unk>"""}
lowerCamelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(__a ) )
def a__ ( self: List[str] , **__a: Any )-> str:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **__a )
def a__ ( self: List[Any] , __a: List[str] )-> int:
lowerCamelCase : Tuple = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
lowerCamelCase : str = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def a__ ( self: Optional[int] , __a: Any )-> str:
lowerCamelCase : Optional[int] = self.get_input_output_texts(__a )
lowerCamelCase : Optional[Any] = tokenizer.encode(__a , add_special_tokens=__a )
lowerCamelCase : Tuple = tokenizer.decode(__a , clean_up_tokenization_spaces=__a )
return text, ids
def a__ ( self: List[str] )-> Tuple:
pass # TODO add if relevant
def a__ ( self: Dict )-> Any:
pass # TODO add if relevant
def a__ ( self: int )-> Optional[Any]:
pass # TODO add if relevant
def a__ ( self: Optional[Any] )-> str:
lowerCamelCase : Any = self.get_tokenizer()
# Testing tokenization
lowerCamelCase : Dict = """こんにちは、世界。 こんばんは、㔺界。"""
lowerCamelCase : List[str] = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
lowerCamelCase : str = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids without special tokens
lowerCamelCase : Any = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowerCamelCase : str = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(__a , __a )
# Testing conversion to ids with special tokens
lowerCamelCase : Dict = tokens + [tokenizer.unk_token]
lowerCamelCase : Dict = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowerCamelCase : int = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(__a , __a )
def a__ ( self: Dict )-> Any:
lowerCamelCase : Any = self.get_tokenizer()
# Testing tokenization
lowerCamelCase : List[Any] = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
lowerCamelCase : Union[str, Any] = """こんにちは、、、、世界。こんばんは、、、、世界。"""
lowerCamelCase : List[Any] = tokenizer.encode(__a )
lowerCamelCase : int = tokenizer.decode(__a )
self.assertEqual(__a , __a )
@slow
def a__ ( self: int )-> List[Any]:
lowerCamelCase : Tuple = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowerCamelCase : Tuple = """こんにちは、世界。"""
lowerCamelCase : List[str] = """こんばんは、㔺界。😀"""
lowerCamelCase : Optional[Any] = """こんにちは、世界。こんばんは、世界。😀"""
lowerCamelCase : Optional[Any] = tokenizer.encode(prefix_text + input_text )
lowerCamelCase : Union[str, Any] = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
lowerCamelCase : int = tokenizer.encode(__a , prefix_text=__a )
lowerCamelCase : str = tokenizer.decode(__a )
lowerCamelCase : Any = tokenizer.decode(__a )
lowerCamelCase : str = tokenizer.decode(__a )
self.assertEqual(__a , __a )
self.assertEqual(__a , __a )
self.assertEqual(__a , __a )
@slow
def a__ ( self: Tuple )-> Any:
lowerCamelCase : List[str] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
lowerCamelCase : Dict = """こんにちは、世界。"""
lowerCamelCase : Any = """こんばんは、㔺界。😀"""
lowerCamelCase : List[str] = len(tokenizer.encode(__a ) ) - 2
lowerCamelCase : int = len(tokenizer.encode(__a ) ) - 2
lowerCamelCase : str = [1] + [0] * (len_prefix + len_text + 1)
lowerCamelCase : int = [1] * (len_prefix + len_text + 1) + [0]
lowerCamelCase : Dict = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowerCamelCase : Optional[Any] = tokenizer(prefix_text + input_text ).token_type_ids
lowerCamelCase : Tuple = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
lowerCamelCase : Optional[int] = tokenizer(__a , prefix_text=__a ).token_type_ids
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
self.assertListEqual(__a , __a )
@slow
def a__ ( self: List[str] )-> Tuple:
lowerCamelCase : Dict = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowerCamelCase : Union[str, Any] = tokenizer.encode("""あンいワ""" )
lowerCamelCase : int = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
lowerCamelCase : int = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(__a ) , tokenizer.decode(__a ) )
self.assertEqual(tokenizer.decode(__a ) , tokenizer.decode(__a ) )
self.assertNotEqual(__a , __a )
self.assertNotEqual(__a , __a )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def a__ ( self: Optional[int] )-> Tuple:
lowerCamelCase : int = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
lowerCamelCase : Optional[int] = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
lowerCamelCase : int = tokenizer(__a , padding=__a )
lowerCamelCase : List[str] = tokenizer.batch_encode_plus(__a , padding=__a )
# fmt: off
lowerCamelCase : Optional[int] = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]]
lowerCamelCase : Optional[Any] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowerCamelCase : Dict = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , __a )
self.assertListEqual(x_token.token_type_ids , __a )
self.assertListEqual(x_token.attention_mask , __a )
self.assertListEqual(x_token_a.input_ids , __a )
self.assertListEqual(x_token_a.token_type_ids , __a )
self.assertListEqual(x_token_a.attention_mask , __a )
def a__ ( self: Any )-> Optional[Any]:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def a__ ( self: Tuple )-> Union[str, Any]:
# tokenizer has no padding token
pass
| 717
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__lowerCamelCase :str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[Any] = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
__lowerCamelCase :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 0
|
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase :Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase :List[str] = {
'vocab_file': 'vocab.txt',
'merges_file': 'bpe.codes',
}
__lowerCamelCase :Dict = {
'vocab_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt',
},
'merges_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes',
},
}
__lowerCamelCase :List[str] = {
'vinai/phobert-base': 256,
'vinai/phobert-large': 256,
}
def snake_case ( UpperCamelCase__ : Union[str, Any] ) -> Dict:
lowerCamelCase : Union[str, Any] = set()
lowerCamelCase : List[str] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase : Union[str, Any] = char
lowerCamelCase : Optional[int] = set(UpperCamelCase__ )
return pairs
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Dict =VOCAB_FILES_NAMES
snake_case__ : Dict =PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self: Optional[int] , __a: List[str] , __a: Any , __a: Union[str, Any]="<s>" , __a: List[Any]="</s>" , __a: str="</s>" , __a: str="<s>" , __a: str="<unk>" , __a: Dict="<pad>" , __a: List[str]="<mask>" , **__a: str , )-> List[str]:
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , **__a , )
lowerCamelCase : Tuple = vocab_file
lowerCamelCase : List[Any] = merges_file
lowerCamelCase : Tuple = {}
lowerCamelCase : List[str] = 0
lowerCamelCase : Union[str, Any] = 1
lowerCamelCase : Tuple = 2
lowerCamelCase : Dict = 3
self.add_from_file(__a )
lowerCamelCase : List[str] = {v: k for k, v in self.encoder.items()}
with open(__a , encoding="""utf-8""" ) as merges_handle:
lowerCamelCase : Optional[int] = merges_handle.read().split("""\n""" )[:-1]
lowerCamelCase : List[Any] = [tuple(merge.split()[:-1] ) for merge in merges]
lowerCamelCase : List[Any] = dict(zip(__a , range(len(__a ) ) ) )
lowerCamelCase : Union[str, Any] = {}
def a__ ( self: Optional[Any] , __a: List[int] , __a: Optional[List[int]] = None )-> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase : Optional[Any] = [self.cls_token_id]
lowerCamelCase : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a__ ( self: str , __a: List[int] , __a: Optional[List[int]] = None , __a: bool = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is None:
return [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1, 1] + ([0] * len(__a )) + [1]
def a__ ( self: str , __a: List[int] , __a: Optional[List[int]] = None )-> List[int]:
lowerCamelCase : int = [self.sep_token_id]
lowerCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def a__ ( self: Optional[Any] )-> Optional[int]:
return len(self.encoder )
def a__ ( self: Any )-> List[str]:
return dict(self.encoder , **self.added_tokens_encoder )
def a__ ( self: Union[str, Any] , __a: Union[str, Any] )-> int:
if token in self.cache:
return self.cache[token]
lowerCamelCase : Tuple = tuple(__a )
lowerCamelCase : List[Any] = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
lowerCamelCase : str = get_pairs(__a )
if not pairs:
return token
while True:
lowerCamelCase : Union[str, Any] = min(__a , key=lambda __a : self.bpe_ranks.get(__a , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase : int = bigram
lowerCamelCase : List[str] = []
lowerCamelCase : Optional[Any] = 0
while i < len(__a ):
try:
lowerCamelCase : List[str] = word.index(__a , __a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase : Union[str, Any] = j
if word[i] == first and i < len(__a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase : List[str] = tuple(__a )
lowerCamelCase : int = new_word
if len(__a ) == 1:
break
else:
lowerCamelCase : List[str] = get_pairs(__a )
lowerCamelCase : Tuple = """@@ """.join(__a )
lowerCamelCase : List[Any] = word[:-4]
lowerCamelCase : Union[str, Any] = word
return word
def a__ ( self: Tuple , __a: int )-> Dict:
lowerCamelCase : Optional[Any] = []
lowerCamelCase : Optional[Any] = re.findall(r"""\S+\n?""" , __a )
for token in words:
split_tokens.extend(list(self.bpe(__a ).split(""" """ ) ) )
return split_tokens
def a__ ( self: List[str] , __a: Optional[Any] )-> Dict:
return self.encoder.get(__a , self.encoder.get(self.unk_token ) )
def a__ ( self: str , __a: Optional[Any] )-> Optional[Any]:
return self.decoder.get(__a , self.unk_token )
def a__ ( self: Any , __a: List[str] )-> int:
lowerCamelCase : int = """ """.join(__a ).replace("""@@ """ , """""" ).strip()
return out_string
def a__ ( self: Dict , __a: str , __a: Optional[str] = None )-> Tuple[str]:
if not os.path.isdir(__a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase : int = os.path.join(
__a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase : Optional[int] = os.path.join(
__a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ):
copyfile(self.vocab_file , __a )
if os.path.abspath(self.merges_file ) != os.path.abspath(__a ):
copyfile(self.merges_file , __a )
return out_vocab_file, out_merge_file
def a__ ( self: List[str] , __a: Optional[Any] )-> Union[str, Any]:
if isinstance(__a , __a ):
try:
with open(__a , """r""" , encoding="""utf-8""" ) as fd:
self.add_from_file(__a )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(f'Incorrect encoding detected in {f}, please rebuild the dataset' )
return
lowerCamelCase : Any = f.readlines()
for lineTmp in lines:
lowerCamelCase : Dict = lineTmp.strip()
lowerCamelCase : Union[str, Any] = line.rfind(""" """ )
if idx == -1:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt>'""" )
lowerCamelCase : Union[str, Any] = line[:idx]
lowerCamelCase : List[str] = len(self.encoder )
| 718
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase :Dict = logging.get_logger()
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : LevitConfig , UpperCamelCase__ : Path , UpperCamelCase__ : bool = True ) -> Dict:
print(F'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
lowerCamelCase : Optional[Any] = timm.create_model("""levit_128s""" , pretrained=UpperCamelCase__ )
else:
lowerCamelCase : Dict = timm.create_model("""levit_128""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 192:
lowerCamelCase : Tuple = timm.create_model("""levit_192""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 256:
lowerCamelCase : Optional[int] = timm.create_model("""levit_256""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 384:
lowerCamelCase : Dict = timm.create_model("""levit_384""" , pretrained=UpperCamelCase__ )
from_model.eval()
lowerCamelCase : Optional[Any] = LevitForImageClassificationWithTeacher(UpperCamelCase__ ).eval()
lowerCamelCase : Tuple = OrderedDict()
lowerCamelCase : Optional[Any] = from_model.state_dict()
lowerCamelCase : str = list(from_model.state_dict().keys() )
lowerCamelCase : List[Any] = list(our_model.state_dict().keys() )
print(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for i in range(len(UpperCamelCase__ ) ):
lowerCamelCase : str = weights[og_keys[i]]
our_model.load_state_dict(UpperCamelCase__ )
lowerCamelCase : int = torch.randn((2, 3, 224, 224) )
lowerCamelCase : Any = from_model(UpperCamelCase__ )
lowerCamelCase : List[Any] = our_model(UpperCamelCase__ ).logits
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ ), "The model logits don't match the original one."
lowerCamelCase : Dict = name
print(UpperCamelCase__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowerCamelCase : Optional[int] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'Pushed {checkpoint_name}' )
def snake_case ( UpperCamelCase__ : Path , UpperCamelCase__ : str = None , UpperCamelCase__ : bool = True ) -> Optional[int]:
lowerCamelCase : Optional[Any] = """imagenet-1k-id2label.json"""
lowerCamelCase : List[Any] = 1000
lowerCamelCase : Dict = (1, num_labels)
lowerCamelCase : List[Any] = """huggingface/label-files"""
lowerCamelCase : Optional[int] = num_labels
lowerCamelCase : List[str] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase : Any = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase : List[Any] = idalabel
lowerCamelCase : str = {v: k for k, v in idalabel.items()}
lowerCamelCase : Tuple = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ )
lowerCamelCase : Optional[int] = {
"""levit-128S""": 128,
"""levit-128""": 128,
"""levit-192""": 192,
"""levit-256""": 256,
"""levit-384""": 384,
}
lowerCamelCase : List[Any] = {
"""levit-128S""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-128""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-192""": ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-256""": ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-384""": ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return config, expected_shape
if __name__ == "__main__":
__lowerCamelCase :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
__lowerCamelCase :List[Any] = parser.parse_args()
__lowerCamelCase :Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 42
| 0
|
"""simple docstring"""
def snake_case ( UpperCamelCase__ : list ) -> list:
if len(UpperCamelCase__ ) <= 1:
return [tuple(UpperCamelCase__ )]
lowerCamelCase : Optional[int] = []
def generate(UpperCamelCase__ : int , UpperCamelCase__ : list ):
lowerCamelCase : List[Any] = [0] * n
res.append(tuple(UpperCamelCase__ ) )
lowerCamelCase : Optional[Any] = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
lowerCamelCase : Optional[int] = arr[i], arr[0]
else:
lowerCamelCase : Optional[int] = arr[i], arr[c[i]]
res.append(tuple(UpperCamelCase__ ) )
c[i] += 1
lowerCamelCase : Any = 0
else:
lowerCamelCase : Optional[Any] = 0
i += 1
generate(len(UpperCamelCase__ ) , UpperCamelCase__ )
return res
if __name__ == "__main__":
__lowerCamelCase :Optional[int] = input('Enter numbers separated by a comma:\n').strip()
__lowerCamelCase :Optional[Any] = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 719
|
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple =(KDPMaDiscreteScheduler,)
snake_case__ : Tuple =10
def a__ ( self: List[Any] , **__a: Optional[int] )-> Union[str, Any]:
lowerCamelCase : int = {
"""num_train_timesteps""": 1_100,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**__a )
return config
def a__ ( self: Union[str, Any] )-> Any:
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__a )
def a__ ( self: str )-> int:
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def a__ ( self: int )-> Union[str, Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a )
def a__ ( self: List[Any] )-> List[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def a__ ( self: Union[str, Any] )-> int:
lowerCamelCase : List[str] = self.scheduler_classes[0]
lowerCamelCase : Union[str, Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
lowerCamelCase : List[str] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase : Dict = self.dummy_model()
lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase : List[Any] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : Optional[Any] = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[int] = model(__a , __a )
lowerCamelCase : Tuple = scheduler.step(__a , __a , __a )
lowerCamelCase : Optional[Any] = output.prev_sample
lowerCamelCase : List[str] = torch.sum(torch.abs(__a ) )
lowerCamelCase : Tuple = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4e-0_7 ) < 1e-2
assert abs(result_mean.item() - 6.1_1_1_2e-1_0 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7 ) < 1e-2
assert abs(result_mean.item() - 0.00_02 ) < 1e-3
def a__ ( self: Any )-> Any:
if torch_device == "mps":
return
lowerCamelCase : Dict = self.scheduler_classes[0]
lowerCamelCase : Dict = self.get_scheduler_config()
lowerCamelCase : int = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase : List[Any] = self.dummy_model()
lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase : Optional[int] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : Dict = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[Any] = model(__a , __a )
lowerCamelCase : Tuple = scheduler.step(__a , __a , __a )
lowerCamelCase : str = output.prev_sample
lowerCamelCase : Tuple = torch.sum(torch.abs(__a ) )
lowerCamelCase : Tuple = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
def a__ ( self: Optional[Any] )-> List[Any]:
if torch_device == "mps":
return
lowerCamelCase : Any = self.scheduler_classes[0]
lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
lowerCamelCase : Optional[Any] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
lowerCamelCase : Union[str, Any] = self.dummy_model()
lowerCamelCase : List[str] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCamelCase : Union[str, Any] = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[int] = model(__a , __a )
lowerCamelCase : int = scheduler.step(__a , __a , __a )
lowerCamelCase : int = output.prev_sample
lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(__a ) )
lowerCamelCase : int = torch.mean(torch.abs(__a ) )
if str(__a ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
| 42
| 0
|
"""simple docstring"""
from __future__ import annotations
__lowerCamelCase :int = 10
def snake_case ( UpperCamelCase__ : list[int] ) -> list[int]:
lowerCamelCase : int = 1
lowerCamelCase : Union[str, Any] = max(UpperCamelCase__ )
while placement <= max_digit:
# declare and initialize empty buckets
lowerCamelCase : list[list] = [[] for _ in range(UpperCamelCase__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
lowerCamelCase : Any = int((i / placement) % RADIX )
buckets[tmp].append(UpperCamelCase__ )
# put each buckets' contents into list_of_ints
lowerCamelCase : Dict = 0
for b in range(UpperCamelCase__ ):
for i in buckets[b]:
lowerCamelCase : List[str] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str =StableDiffusionXLImgaImgPipeline
snake_case__ : Any =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
snake_case__ : Optional[int] =PipelineTesterMixin.required_optional_params - {'''latents'''}
snake_case__ : Dict =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case__ : Tuple =IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case__ : List[str] =IMAGE_TO_IMAGE_IMAGE_PARAMS
def a__ ( self: List[str] )-> int:
torch.manual_seed(0 )
lowerCamelCase : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=__a , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
lowerCamelCase : Any = EulerDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
lowerCamelCase : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=32 , )
lowerCamelCase : Dict = CLIPTextModel(__a )
lowerCamelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__a )
lowerCamelCase : Dict = CLIPTextModelWithProjection(__a )
lowerCamelCase : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__a )
lowerCamelCase : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def a__ ( self: Any , __a: str , __a: Tuple=0 )-> Union[str, Any]:
lowerCamelCase : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
lowerCamelCase : Any = image / 2 + 0.5
if str(__a ).startswith("""mps""" ):
lowerCamelCase : Dict = torch.manual_seed(__a )
else:
lowerCamelCase : Tuple = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.75,
}
return inputs
def a__ ( self: Dict )-> Optional[Any]:
lowerCamelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase : Union[str, Any] = self.get_dummy_components()
lowerCamelCase : Optional[int] = StableDiffusionXLImgaImgPipeline(**__a )
lowerCamelCase : int = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Optional[Any] = self.get_dummy_inputs(__a )
lowerCamelCase : Optional[int] = sd_pipe(**__a ).images
lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase : Any = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self: Optional[int] )-> Union[str, Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def a__ ( self: Optional[Any] )-> str:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def a__ ( self: List[str] )-> Optional[Any]:
pass
def a__ ( self: List[Any] )-> Union[str, Any]:
lowerCamelCase : Tuple = self.get_dummy_components()
lowerCamelCase : Union[str, Any] = StableDiffusionXLImgaImgPipeline(**__a )
lowerCamelCase : str = sd_pipe.to(__a )
lowerCamelCase : Any = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
# forward without prompt embeds
lowerCamelCase : Dict = self.get_dummy_inputs(__a )
lowerCamelCase : Any = 3 * ["""this is a negative prompt"""]
lowerCamelCase : Optional[int] = negative_prompt
lowerCamelCase : Tuple = 3 * [inputs["""prompt"""]]
lowerCamelCase : List[Any] = sd_pipe(**__a )
lowerCamelCase : Optional[int] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCamelCase : Tuple = self.get_dummy_inputs(__a )
lowerCamelCase : List[Any] = 3 * ["""this is a negative prompt"""]
lowerCamelCase : Tuple = 3 * [inputs.pop("""prompt""" )]
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Union[str, Any] = sd_pipe.encode_prompt(__a , negative_prompt=__a )
lowerCamelCase : int = sd_pipe(
**__a , prompt_embeds=__a , negative_prompt_embeds=__a , pooled_prompt_embeds=__a , negative_pooled_prompt_embeds=__a , )
lowerCamelCase : Union[str, Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Dict )-> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self: Union[str, Any] , __a: Any , __a: Any="cpu" , __a: str=torch.floataa , __a: Any=0 )-> Optional[Any]:
lowerCamelCase : Optional[Any] = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : List[Any] = np.random.RandomState(__a ).standard_normal((1, 4, 64, 64) )
lowerCamelCase : List[str] = torch.from_numpy(__a ).to(device=__a , dtype=__a )
lowerCamelCase : int = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def a__ ( self: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Optional[int] = self.get_inputs(__a )
lowerCamelCase : Optional[Any] = pipe(**__a ).images
lowerCamelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCamelCase : List[str] = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 42
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Any = logging.get_logger(__name__)
__lowerCamelCase :Dict = {
'RWKV/rwkv-4-169m-pile': 'https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json',
'RWKV/rwkv-4-430m-pile': 'https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json',
'RWKV/rwkv-4-1b5-pile': 'https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json',
'RWKV/rwkv-4-3b-pile': 'https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json',
'RWKV/rwkv-4-7b-pile': 'https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json',
'RWKV/rwkv-4-14b-pile': 'https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json',
'RWKV/rwkv-raven-1b5': 'https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json',
'RWKV/rwkv-raven-3b': 'https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json',
'RWKV/rwkv-raven-7b': 'https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json',
'RWKV/rwkv-raven-14b': 'https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json',
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Any ='''rwkv'''
snake_case__ : str ={'''max_position_embeddings''': '''context_length'''}
def __init__( self: int , __a: Optional[int]=50_277 , __a: Optional[int]=1_024 , __a: Dict=4_096 , __a: Dict=32 , __a: Optional[Any]=None , __a: Dict=None , __a: int=1e-5 , __a: List[str]=0 , __a: Any=0 , __a: Union[str, Any]=6 , __a: Optional[Any]=False , __a: Optional[Any]=True , **__a: List[str] , )-> Union[str, Any]:
lowerCamelCase : int = vocab_size
lowerCamelCase : int = context_length
lowerCamelCase : List[str] = hidden_size
lowerCamelCase : Union[str, Any] = num_hidden_layers
lowerCamelCase : Any = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowerCamelCase : str = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowerCamelCase : List[str] = layer_norm_epsilon
lowerCamelCase : Any = rescale_every
lowerCamelCase : Optional[int] = use_cache
lowerCamelCase : Dict = bos_token_id
lowerCamelCase : str = eos_token_id
super().__init__(
tie_word_embeddings=__a , bos_token_id=__a , eos_token_id=__a , **__a )
| 721
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class A__ :
"""simple docstring"""
def a__ ( self: Optional[int] , __a: Optional[int] , __a: Tuple , __a: Optional[int] )-> List[str]:
return None
class A__ :
"""simple docstring"""
def a__ ( self: Optional[int] , __a: Tuple , __a: str , __a: str , __a: str )-> Tuple:
return None
class A__ ( unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] =[
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def a__ ( self: Optional[Any] )-> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , """tf""" , 12 , **__a )
@require_torch
@slow
def a__ ( self: str )-> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , """pt""" , 12 , **__a )
@require_torch
@slow
def a__ ( self: Union[str, Any] )-> Dict:
from transformers import BertModel
lowerCamelCase : int = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(__a ) )
vocab_file.flush()
lowerCamelCase : Dict = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase : List[str] = BertModel(BertConfig(vocab_size=len(__a ) ) )
model.save_pretrained(__a )
self._test_export(__a , """pt""" , 12 , __a )
@require_tf
@slow
def a__ ( self: Optional[Any] )-> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase : Optional[int] = self._test_export(__a , """tf""" , 12 , **__a )
lowerCamelCase : Tuple = quantize(Path(__a ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def a__ ( self: Any )-> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase : Any = self._test_export(__a , """pt""" , 12 , **__a )
lowerCamelCase : Dict = quantize(__a )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def a__ ( self: List[Any] , __a: Optional[Any] , __a: List[Any] , __a: Union[str, Any] , __a: Optional[Any]=None , **__a: Optional[int] )-> Any:
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase : Optional[Any] = Path(__a ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__a , __a , __a , __a , __a , **__a )
return path
except Exception as e:
self.fail(__a )
@require_torch
@require_tokenizers
@slow
def a__ ( self: Tuple )-> Dict:
from transformers import BertModel
lowerCamelCase : int = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCamelCase : List[Any] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__a , __a , """pt""" )
@require_tf
@require_tokenizers
@slow
def a__ ( self: Optional[Any] )-> List[Any]:
from transformers import TFBertModel
lowerCamelCase : Union[str, Any] = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCamelCase : str = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__a , __a , """tf""" )
def a__ ( self: List[str] , __a: str , __a: Optional[Any] , __a: str )-> List[Any]:
lowerCamelCase : List[str] = FeatureExtractionPipeline(__a , __a )
lowerCamelCase : List[str] = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = infer_shapes(__a , __a )
# Assert all variables are present
self.assertEqual(len(__a ) , len(__a ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __a )
self.assertSequenceEqual(variable_names[3:] , __a )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} )
def a__ ( self: List[Any] )-> int:
lowerCamelCase : List[str] = ["""input_ids""", """attention_mask""", """token_type_ids"""]
lowerCamelCase : str = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncContiguousArgs() , __a , __a )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__a ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__a ) , set(__a ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__a , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncNonContiguousArgs() , __a , __a )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__a ) , 1 )
self.assertEqual(len(__a ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] , """input_ids""" )
def a__ ( self: Tuple )-> Tuple:
lowerCamelCase : Optional[int] = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
| 42
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def snake_case ( UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : str=None , UpperCamelCase__ : Union[str, Any]=None ) -> Any:
if attention_mask is None:
lowerCamelCase : Any = tf.cast(tf.math.not_equal(UpperCamelCase__ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class A__ :
"""simple docstring"""
snake_case__ : Optional[Any] =OPTConfig
snake_case__ : int ={}
snake_case__ : Dict ='''gelu'''
def __init__( self: List[str] , __a: Optional[Any] , __a: List[str]=13 , __a: Optional[Any]=7 , __a: List[str]=True , __a: Tuple=False , __a: Tuple=99 , __a: Optional[Any]=16 , __a: List[str]=2 , __a: Dict=4 , __a: Any=4 , __a: int="gelu" , __a: Optional[int]=0.1 , __a: List[str]=0.1 , __a: List[Any]=20 , __a: Optional[Any]=2 , __a: str=1 , __a: Optional[Any]=0 , __a: Optional[Any]=16 , __a: Optional[Any]=16 , )-> Tuple:
lowerCamelCase : Optional[int] = parent
lowerCamelCase : List[str] = batch_size
lowerCamelCase : int = seq_length
lowerCamelCase : List[Any] = is_training
lowerCamelCase : int = use_labels
lowerCamelCase : Tuple = vocab_size
lowerCamelCase : Any = hidden_size
lowerCamelCase : List[str] = num_hidden_layers
lowerCamelCase : List[str] = num_attention_heads
lowerCamelCase : Dict = intermediate_size
lowerCamelCase : str = hidden_act
lowerCamelCase : int = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : Optional[Any] = max_position_embeddings
lowerCamelCase : Tuple = eos_token_id
lowerCamelCase : List[str] = pad_token_id
lowerCamelCase : List[str] = bos_token_id
lowerCamelCase : List[Any] = embed_dim
lowerCamelCase : Union[str, Any] = word_embed_proj_dim
lowerCamelCase : str = False
def a__ ( self: Optional[Any] )-> List[str]:
lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCamelCase : Union[str, Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCamelCase : List[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCamelCase : List[Any] = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=__a , **self.config_updates , )
lowerCamelCase : int = prepare_opt_inputs_dict(__a , __a )
return config, inputs_dict
def a__ ( self: Optional[Any] , __a: Tuple , __a: Tuple )-> List[str]:
lowerCamelCase : str = TFOPTModel(config=__a )
lowerCamelCase : Any = inputs_dict["""input_ids"""]
lowerCamelCase : Any = input_ids[:1, :]
lowerCamelCase : str = inputs_dict["""attention_mask"""][:1, :]
lowerCamelCase : Dict = 1
# first forward pass
lowerCamelCase : List[str] = model(__a , attention_mask=__a , use_cache=__a )
lowerCamelCase : Tuple = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase : Dict = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCamelCase : List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCamelCase : List[str] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCamelCase : Any = model(__a , attention_mask=__a )[0]
lowerCamelCase : Optional[Any] = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCamelCase : List[str] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCamelCase : Any = output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase : Any = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1e-3 )
@require_tf
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : int =(TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
snake_case__ : Union[str, Any] =(TFOPTForCausalLM,) if is_tf_available() else ()
snake_case__ : List[str] =(
{'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {}
)
snake_case__ : Dict =False
snake_case__ : List[str] =False
snake_case__ : List[Any] =False
snake_case__ : Tuple =10
def a__ ( self: Dict )-> List[str]:
lowerCamelCase : Tuple = TFOPTModelTester(self )
lowerCamelCase : List[Any] = ConfigTester(self , config_class=__a )
def a__ ( self: Optional[int] )-> Optional[Any]:
self.config_tester.run_common_tests()
def a__ ( self: Any )-> Dict:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
def a__ ( self: Dict )-> Union[str, Any]:
lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(__a: List[Any] , __a: Tuple ):
if hasattr(__a , """weight""" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(__a , """weight""" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
lowerCamelCase : List[str] = model_class(config=__a )
lowerCamelCase : Optional[Any] = _get_word_embedding_weight(__a , model.get_input_embeddings() )
lowerCamelCase : int = _get_word_embedding_weight(__a , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(__a )
lowerCamelCase : List[Any] = _get_word_embedding_weight(__a , model.get_input_embeddings() )
lowerCamelCase : Optional[int] = _get_word_embedding_weight(__a , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
lowerCamelCase : Tuple = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , __a )
# check that weights remain the same after resizing
lowerCamelCase : List[Any] = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCamelCase : Dict = False
self.assertTrue(__a )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , __a )
lowerCamelCase : Any = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCamelCase : Optional[int] = False
self.assertTrue(__a )
def snake_case ( UpperCamelCase__ : str ) -> Tuple:
return tf.constant(UpperCamelCase__ , dtype=tf.intaa )
@require_tf
class A__ ( unittest.TestCase):
"""simple docstring"""
snake_case__ : Tuple =99
def a__ ( self: Optional[int] )-> Optional[Any]:
lowerCamelCase : List[Any] = tf.ones((4, 1) , dtype=tf.intaa ) * 2
lowerCamelCase : Any = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
lowerCamelCase : Union[str, Any] = input_ids.shape[0]
lowerCamelCase : List[str] = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class A__ ( unittest.TestCase):
"""simple docstring"""
@slow
def a__ ( self: List[str] )-> int:
lowerCamelCase : int = TFOPTModel.from_pretrained("""facebook/opt-350m""" )
lowerCamelCase : Tuple = _long_tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
lowerCamelCase : str = tf.not_equal(__a , model.config.pad_token_id )
with tf.GradientTape():
lowerCamelCase : Tuple = model(input_ids=__a , attention_mask=__a ).last_hidden_state
lowerCamelCase : List[Any] = (1, 11, 512)
self.assertEqual(output.shape , __a )
lowerCamelCase : Union[str, Any] = tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] )
self.assertTrue(np.allclose(output[:, :3, :3] , __a , atol=4e-3 ) )
lowerCamelCase : List[Any] = tf.function(__a , jit_compile=__a )
lowerCamelCase : Optional[Any] = xla_generate(__a , __a )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , __a , atol=4e-2 ) )
@require_tf
@slow
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: str )-> str:
super().setUp()
lowerCamelCase : List[str] = """facebook/opt-350m"""
def a__ ( self: Optional[int] )-> Tuple:
lowerCamelCase : Optional[int] = TFOPTForCausalLM.from_pretrained(self.path_model )
lowerCamelCase : Optional[int] = GPTaTokenizer.from_pretrained(self.path_model )
lowerCamelCase : Dict = [
"""Today is a beautiful day and I want to""",
"""In the city of""",
"""Paris is the capital of France and""",
"""Computers and mobile phones have taken""",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
lowerCamelCase : Any = tokenizer(__a , return_tensors="""tf""" , padding=__a , add_special_tokens=__a )
lowerCamelCase : int = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
lowerCamelCase : Union[str, Any] = tf.constant(
[
[1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
] )
self.assertTrue(np.allclose(__a , __a , atol=1e-4 ) )
lowerCamelCase : Dict = tf.function(__a , jit_compile=__a )
lowerCamelCase : str = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(__a , __a , atol=1e-4 ) )
@require_tf
@slow
class A__ ( unittest.TestCase):
"""simple docstring"""
@property
def a__ ( self: str )-> int:
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def a__ ( self: Union[str, Any] )-> Union[str, Any]:
lowerCamelCase : int = """facebook/opt-125m"""
lowerCamelCase : Optional[int] = [
"""Today is a beautiful day and I want to""",
"""In the city of New York, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
lowerCamelCase : List[str] = []
lowerCamelCase : Union[str, Any] = GPTaTokenizer.from_pretrained(__a )
lowerCamelCase : str = TFOPTForCausalLM.from_pretrained(__a )
for prompt in self.prompts:
lowerCamelCase : int = tokenizer(__a , return_tensors="""tf""" ).input_ids
lowerCamelCase : Any = model.generate(__a , max_length=10 )
lowerCamelCase : Tuple = tokenizer.batch_decode(__a , skip_special_tokens=__a )
predicted_outputs += generated_string
self.assertListEqual(__a , __a )
def a__ ( self: List[Any] )-> List[Any]:
lowerCamelCase : Optional[int] = """facebook/opt-350m"""
lowerCamelCase : str = GPTaTokenizer.from_pretrained(__a )
lowerCamelCase : Tuple = TFOPTForCausalLM.from_pretrained(__a )
lowerCamelCase : int = """left"""
# use different length sentences to test batching
lowerCamelCase : Any = [
"""Hello, my dog is a little""",
"""Today, I""",
]
lowerCamelCase : Union[str, Any] = tokenizer(__a , return_tensors="""tf""" , padding=__a )
lowerCamelCase : Dict = inputs["""input_ids"""]
lowerCamelCase : List[str] = model.generate(input_ids=__a , attention_mask=inputs["""attention_mask"""] )
lowerCamelCase : Tuple = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
lowerCamelCase : Dict = model.generate(input_ids=__a )
lowerCamelCase : List[str] = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["""attention_mask"""][-1] , tf.intaa ) )
lowerCamelCase : str = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
lowerCamelCase : Optional[int] = model.generate(input_ids=__a , max_length=model.config.max_length - num_paddings )
lowerCamelCase : str = tokenizer.batch_decode(__a , skip_special_tokens=__a )
lowerCamelCase : Optional[int] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__a )
lowerCamelCase : List[str] = tokenizer.decode(output_padded[0] , skip_special_tokens=__a )
lowerCamelCase : List[str] = [
"""Hello, my dog is a little bit of a dork.\nI'm a little bit""",
"""Today, I was in the middle of a conversation with a friend about the""",
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , [non_padded_sentence, padded_sentence] )
def a__ ( self: Optional[int] )-> str:
lowerCamelCase : Optional[int] = """facebook/opt-350m"""
lowerCamelCase : Dict = [
"""Today is a beautiful day and I want to""",
"""In the city of San Francisco, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
lowerCamelCase : Tuple = []
lowerCamelCase : Optional[int] = GPTaTokenizer.from_pretrained(__a )
lowerCamelCase : List[Any] = TFOPTForCausalLM.from_pretrained(__a )
for prompt in self.prompts:
lowerCamelCase : int = tokenizer(__a , return_tensors="""tf""" ).input_ids
lowerCamelCase : Dict = model.generate(__a , max_length=10 )
lowerCamelCase : Any = tokenizer.batch_decode(__a , skip_special_tokens=__a )
predicted_outputs += generated_string
self.assertListEqual(__a , __a )
| 700
|
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Optional[int] )-> Union[str, Any]:
lowerCamelCase : Tuple = [10, 20, 30, 40, 50, 60]
lowerCamelCase : Union[str, Any] = [2, 4, 6, 8, 10, 12]
lowerCamelCase : Union[str, Any] = 100
self.assertEqual(kp.calc_profit(__a , __a , __a ) , 210 )
def a__ ( self: str )-> str:
self.assertRaisesRegex(__a , """max_weight must greater than zero.""" )
def a__ ( self: str )-> List[Any]:
self.assertRaisesRegex(__a , """Weight can not be negative.""" )
def a__ ( self: Any )-> Dict:
self.assertRaisesRegex(__a , """Profit can not be negative.""" )
def a__ ( self: Optional[Any] )-> List[Any]:
self.assertRaisesRegex(__a , """max_weight must greater than zero.""" )
def a__ ( self: Optional[Any] )-> Tuple:
self.assertRaisesRegex(
__a , """The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 42
| 0
|
"""simple docstring"""
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__lowerCamelCase :str = 0
__lowerCamelCase :Optional[int] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowerCamelCase :Tuple = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__lowerCamelCase :Tuple = tuple[int, int]
class A__ :
"""simple docstring"""
def __init__( self: int , __a: int , __a: int , __a: int , __a: int , __a: int , __a: Node | None , )-> None:
lowerCamelCase : Optional[int] = pos_x
lowerCamelCase : List[Any] = pos_y
lowerCamelCase : Union[str, Any] = (pos_y, pos_x)
lowerCamelCase : List[Any] = goal_x
lowerCamelCase : Optional[Any] = goal_y
lowerCamelCase : str = g_cost
lowerCamelCase : str = parent
lowerCamelCase : Optional[Any] = self.calculate_heuristic()
lowerCamelCase : List[str] = self.g_cost + self.h_cost
def a__ ( self: List[str] )-> float:
lowerCamelCase : List[str] = self.pos_x - self.goal_x
lowerCamelCase : Optional[Any] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(__a ) + abs(__a )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self: List[Any] , __a: Node )-> bool:
return self.f_cost < other.f_cost
class A__ :
"""simple docstring"""
def __init__( self: Any , __a: TPosition , __a: TPosition )-> Optional[Any]:
lowerCamelCase : List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __a )
lowerCamelCase : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , __a )
lowerCamelCase : Optional[Any] = [self.start]
lowerCamelCase : list[Node] = []
lowerCamelCase : Tuple = False
def a__ ( self: int )-> list[TPosition]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
lowerCamelCase : Tuple = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(__a )
self.closed_nodes.append(__a )
lowerCamelCase : Dict = self.get_successors(__a )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__a )
else:
# retrieve the best current path
lowerCamelCase : Any = self.open_nodes.pop(self.open_nodes.index(__a ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__a )
else:
self.open_nodes.append(__a )
return [self.start.pos]
def a__ ( self: Union[str, Any] , __a: Node )-> list[Node]:
lowerCamelCase : str = []
for action in delta:
lowerCamelCase : Union[str, Any] = parent.pos_x + action[1]
lowerCamelCase : Optional[Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__a ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__a , __a , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __a , ) )
return successors
def a__ ( self: Union[str, Any] , __a: Node | None )-> list[TPosition]:
lowerCamelCase : Dict = node
lowerCamelCase : Tuple = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCamelCase : str = current_node.parent
path.reverse()
return path
class A__ :
"""simple docstring"""
def __init__( self: Union[str, Any] , __a: TPosition , __a: TPosition )-> None:
lowerCamelCase : Dict = AStar(__a , __a )
lowerCamelCase : str = AStar(__a , __a )
lowerCamelCase : int = False
def a__ ( self: str )-> list[TPosition]:
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
lowerCamelCase : Tuple = self.fwd_astar.open_nodes.pop(0 )
lowerCamelCase : List[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
__a , __a )
self.fwd_astar.closed_nodes.append(__a )
self.bwd_astar.closed_nodes.append(__a )
lowerCamelCase : int = current_bwd_node
lowerCamelCase : List[Any] = current_fwd_node
lowerCamelCase : Optional[int] = {
self.fwd_astar: self.fwd_astar.get_successors(__a ),
self.bwd_astar: self.bwd_astar.get_successors(__a ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(__a )
else:
# retrieve the best current path
lowerCamelCase : str = astar.open_nodes.pop(
astar.open_nodes.index(__a ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(__a )
else:
astar.open_nodes.append(__a )
return [self.fwd_astar.start.pos]
def a__ ( self: List[str] , __a: Node , __a: Node )-> list[TPosition]:
lowerCamelCase : str = self.fwd_astar.retrace_path(__a )
lowerCamelCase : Any = self.bwd_astar.retrace_path(__a )
bwd_path.pop()
bwd_path.reverse()
lowerCamelCase : Any = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__lowerCamelCase :Optional[int] = (0, 0)
__lowerCamelCase :Optional[int] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowerCamelCase :Optional[int] = time.time()
__lowerCamelCase :Optional[int] = AStar(init, goal)
__lowerCamelCase :int = a_star.search()
__lowerCamelCase :Optional[Any] = time.time() - start_time
print(f"""AStar execution time = {end_time:f} seconds""")
__lowerCamelCase :List[Any] = time.time()
__lowerCamelCase :List[str] = BidirectionalAStar(init, goal)
__lowerCamelCase :List[str] = time.time() - bd_start_time
print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 701
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase :List[str] = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[int] = ['OwlViTFeatureExtractor']
__lowerCamelCase :List[str] = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[Any] = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : torch.FloatTensor
class A__ ( nn.Module):
"""simple docstring"""
def __init__( self: Dict , __a: Optional[Any]=3 , __a: Any=3 , __a: List[str]=("DownEncoderBlock2D",) , __a: List[str]=(64,) , __a: Union[str, Any]=2 , __a: Union[str, Any]=32 , __a: Union[str, Any]="silu" , __a: List[str]=True , )-> Dict:
super().__init__()
lowerCamelCase : Dict = layers_per_block
lowerCamelCase : List[Any] = torch.nn.Convad(
__a , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
lowerCamelCase : str = None
lowerCamelCase : str = nn.ModuleList([] )
# down
lowerCamelCase : List[str] = block_out_channels[0]
for i, down_block_type in enumerate(__a ):
lowerCamelCase : Optional[int] = output_channel
lowerCamelCase : Optional[Any] = block_out_channels[i]
lowerCamelCase : List[str] = i == len(__a ) - 1
lowerCamelCase : str = get_down_block(
__a , num_layers=self.layers_per_block , in_channels=__a , out_channels=__a , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=__a , resnet_groups=__a , attention_head_dim=__a , temb_channels=__a , )
self.down_blocks.append(__a )
# mid
lowerCamelCase : int = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__a , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=__a , temb_channels=__a , )
# out
lowerCamelCase : Tuple = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__a , eps=1e-6 )
lowerCamelCase : int = nn.SiLU()
lowerCamelCase : Union[str, Any] = 2 * out_channels if double_z else out_channels
lowerCamelCase : List[str] = nn.Convad(block_out_channels[-1] , __a , 3 , padding=1 )
lowerCamelCase : str = False
def a__ ( self: List[Any] , __a: Dict )-> Union[str, Any]:
lowerCamelCase : Tuple = x
lowerCamelCase : Optional[int] = self.conv_in(__a )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__a: Tuple ):
def custom_forward(*__a: int ):
return module(*__a )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
lowerCamelCase : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(__a ) , __a , use_reentrant=__a )
# middle
lowerCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , use_reentrant=__a )
else:
for down_block in self.down_blocks:
lowerCamelCase : int = torch.utils.checkpoint.checkpoint(create_custom_forward(__a ) , __a )
# middle
lowerCamelCase : List[Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __a )
else:
# down
for down_block in self.down_blocks:
lowerCamelCase : Tuple = down_block(__a )
# middle
lowerCamelCase : Optional[int] = self.mid_block(__a )
# post-process
lowerCamelCase : Tuple = self.conv_norm_out(__a )
lowerCamelCase : Optional[Any] = self.conv_act(__a )
lowerCamelCase : List[Any] = self.conv_out(__a )
return sample
class A__ ( nn.Module):
"""simple docstring"""
def __init__( self: Union[str, Any] , __a: Dict=3 , __a: Dict=3 , __a: Union[str, Any]=("UpDecoderBlock2D",) , __a: List[Any]=(64,) , __a: List[Any]=2 , __a: List[str]=32 , __a: Dict="silu" , __a: int="group" , )-> Union[str, Any]:
super().__init__()
lowerCamelCase : Dict = layers_per_block
lowerCamelCase : List[str] = nn.Convad(
__a , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
lowerCamelCase : Optional[Any] = None
lowerCamelCase : Optional[Any] = nn.ModuleList([] )
lowerCamelCase : Any = in_channels if norm_type == """spatial""" else None
# mid
lowerCamelCase : List[str] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=__a , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__a , temb_channels=__a , )
# up
lowerCamelCase : str = list(reversed(__a ) )
lowerCamelCase : List[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__a ):
lowerCamelCase : Optional[Any] = output_channel
lowerCamelCase : str = reversed_block_out_channels[i]
lowerCamelCase : int = i == len(__a ) - 1
lowerCamelCase : str = get_up_block(
__a , num_layers=self.layers_per_block + 1 , in_channels=__a , out_channels=__a , prev_output_channel=__a , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=__a , resnet_groups=__a , attention_head_dim=__a , temb_channels=__a , resnet_time_scale_shift=__a , )
self.up_blocks.append(__a )
lowerCamelCase : Tuple = output_channel
# out
if norm_type == "spatial":
lowerCamelCase : int = SpatialNorm(block_out_channels[0] , __a )
else:
lowerCamelCase : str = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__a , eps=1e-6 )
lowerCamelCase : int = nn.SiLU()
lowerCamelCase : Optional[int] = nn.Convad(block_out_channels[0] , __a , 3 , padding=1 )
lowerCamelCase : Union[str, Any] = False
def a__ ( self: List[Any] , __a: List[Any] , __a: List[str]=None )-> List[Any]:
lowerCamelCase : List[str] = z
lowerCamelCase : str = self.conv_in(__a )
lowerCamelCase : str = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__a: str ):
def custom_forward(*__a: List[str] ):
return module(*__a )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
lowerCamelCase : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , __a , use_reentrant=__a )
lowerCamelCase : Dict = sample.to(__a )
# up
for up_block in self.up_blocks:
lowerCamelCase : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(__a ) , __a , __a , use_reentrant=__a )
else:
# middle
lowerCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __a , __a )
lowerCamelCase : Any = sample.to(__a )
# up
for up_block in self.up_blocks:
lowerCamelCase : List[str] = torch.utils.checkpoint.checkpoint(create_custom_forward(__a ) , __a , __a )
else:
# middle
lowerCamelCase : Tuple = self.mid_block(__a , __a )
lowerCamelCase : Union[str, Any] = sample.to(__a )
# up
for up_block in self.up_blocks:
lowerCamelCase : str = up_block(__a , __a )
# post-process
if latent_embeds is None:
lowerCamelCase : Union[str, Any] = self.conv_norm_out(__a )
else:
lowerCamelCase : Optional[Any] = self.conv_norm_out(__a , __a )
lowerCamelCase : Optional[int] = self.conv_act(__a )
lowerCamelCase : List[Any] = self.conv_out(__a )
return sample
class A__ ( nn.Module):
"""simple docstring"""
def __init__( self: Tuple , __a: str , __a: Any , __a: str , __a: List[str]=None , __a: Optional[Any]="random" , __a: List[str]=False , __a: Optional[Any]=True )-> Optional[int]:
super().__init__()
lowerCamelCase : str = n_e
lowerCamelCase : str = vq_embed_dim
lowerCamelCase : int = beta
lowerCamelCase : Union[str, Any] = legacy
lowerCamelCase : List[Any] = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
lowerCamelCase : int = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
lowerCamelCase : str = self.used.shape[0]
lowerCamelCase : Dict = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
lowerCamelCase : Tuple = self.re_embed
lowerCamelCase : Optional[int] = self.re_embed + 1
print(
f'Remapping {self.n_e} indices to {self.re_embed} indices. '
f'Using {self.unknown_index} for unknown indices.' )
else:
lowerCamelCase : Tuple = n_e
lowerCamelCase : List[Any] = sane_index_shape
def a__ ( self: int , __a: List[Any] )-> Tuple:
lowerCamelCase : str = inds.shape
assert len(__a ) > 1
lowerCamelCase : Optional[Any] = inds.reshape(ishape[0] , -1 )
lowerCamelCase : str = self.used.to(__a )
lowerCamelCase : Optional[Any] = (inds[:, :, None] == used[None, None, ...]).long()
lowerCamelCase : Optional[Any] = match.argmax(-1 )
lowerCamelCase : Tuple = match.sum(2 ) < 1
if self.unknown_index == "random":
lowerCamelCase : Dict = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
lowerCamelCase : Optional[int] = self.unknown_index
return new.reshape(__a )
def a__ ( self: Union[str, Any] , __a: Optional[int] )-> Optional[Any]:
lowerCamelCase : List[Any] = inds.shape
assert len(__a ) > 1
lowerCamelCase : Any = inds.reshape(ishape[0] , -1 )
lowerCamelCase : Optional[Any] = self.used.to(__a )
if self.re_embed > self.used.shape[0]: # extra token
lowerCamelCase : int = 0 # simply set to zero
lowerCamelCase : Dict = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __a )
return back.reshape(__a )
def a__ ( self: Any , __a: Optional[int] )-> Dict:
# reshape z -> (batch, height, width, channel) and flatten
lowerCamelCase : Dict = z.permute(0 , 2 , 3 , 1 ).contiguous()
lowerCamelCase : int = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
lowerCamelCase : Union[str, Any] = torch.argmin(torch.cdist(__a , self.embedding.weight ) , dim=1 )
lowerCamelCase : Any = self.embedding(__a ).view(z.shape )
lowerCamelCase : Union[str, Any] = None
lowerCamelCase : Union[str, Any] = None
# compute loss for embedding
if not self.legacy:
lowerCamelCase : Optional[int] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
lowerCamelCase : Tuple = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
lowerCamelCase : List[str] = z + (z_q - z).detach()
# reshape back to match original input shape
lowerCamelCase : List[Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
lowerCamelCase : Dict = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
lowerCamelCase : int = self.remap_to_used(__a )
lowerCamelCase : Optional[int] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
lowerCamelCase : Tuple = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def a__ ( self: Tuple , __a: List[str] , __a: List[str] )-> Union[str, Any]:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
lowerCamelCase : List[Any] = indices.reshape(shape[0] , -1 ) # add batch axis
lowerCamelCase : str = self.unmap_to_all(__a )
lowerCamelCase : List[str] = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
lowerCamelCase : Dict = self.embedding(__a )
if shape is not None:
lowerCamelCase : List[str] = z_q.view(__a )
# reshape back to match original input shape
lowerCamelCase : Optional[int] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class A__ ( __lowercase):
"""simple docstring"""
def __init__( self: Dict , __a: Optional[Any] , __a: List[Any]=False )-> str:
lowerCamelCase : Optional[Any] = parameters
lowerCamelCase : List[Any] = torch.chunk(__a , 2 , dim=1 )
lowerCamelCase : Optional[int] = torch.clamp(self.logvar , -30.0 , 20.0 )
lowerCamelCase : Dict = deterministic
lowerCamelCase : Tuple = torch.exp(0.5 * self.logvar )
lowerCamelCase : Union[str, Any] = torch.exp(self.logvar )
if self.deterministic:
lowerCamelCase : Dict = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def a__ ( self: Any , __a: Optional[torch.Generator] = None )-> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
lowerCamelCase : Optional[Any] = randn_tensor(
self.mean.shape , generator=__a , device=self.parameters.device , dtype=self.parameters.dtype )
lowerCamelCase : Optional[Any] = self.mean + self.std * sample
return x
def a__ ( self: Optional[Any] , __a: Tuple=None )-> Optional[Any]:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def a__ ( self: Optional[Any] , __a: Any , __a: List[Any]=[1, 2, 3] )-> str:
if self.deterministic:
return torch.Tensor([0.0] )
lowerCamelCase : List[str] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__a )
def a__ ( self: List[Any] )-> Dict:
return self.mean
| 702
|
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self: List[Any] , __a: List[str] , __a: Optional[int]=13 , __a: List[str]=32 , __a: int=2 , __a: List[str]=3 , __a: Union[str, Any]=16 , __a: int=[32, 64, 128] , __a: Optional[Any]=[1, 2, 1] , __a: Optional[int]=[2, 2, 4] , __a: Tuple=2 , __a: Dict=2.0 , __a: List[str]=True , __a: Optional[Any]=0.0 , __a: Any=0.0 , __a: List[Any]=0.1 , __a: List[str]="gelu" , __a: Tuple=False , __a: Union[str, Any]=True , __a: Optional[int]=0.02 , __a: Tuple=1e-5 , __a: int=True , __a: List[Any]=None , __a: Optional[int]=True , __a: Dict=10 , __a: List[str]=8 , __a: Any=["stage1", "stage2"] , __a: Union[str, Any]=[1, 2] , )-> Dict:
lowerCamelCase : Dict = parent
lowerCamelCase : Optional[Any] = batch_size
lowerCamelCase : Union[str, Any] = image_size
lowerCamelCase : Optional[int] = patch_size
lowerCamelCase : Any = num_channels
lowerCamelCase : Any = embed_dim
lowerCamelCase : Dict = hidden_sizes
lowerCamelCase : List[Any] = depths
lowerCamelCase : Tuple = num_heads
lowerCamelCase : List[Any] = window_size
lowerCamelCase : str = mlp_ratio
lowerCamelCase : str = qkv_bias
lowerCamelCase : str = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Tuple = drop_path_rate
lowerCamelCase : Dict = hidden_act
lowerCamelCase : Tuple = use_absolute_embeddings
lowerCamelCase : List[str] = patch_norm
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : str = initializer_range
lowerCamelCase : Tuple = is_training
lowerCamelCase : int = scope
lowerCamelCase : Union[str, Any] = use_labels
lowerCamelCase : List[str] = type_sequence_label_size
lowerCamelCase : str = encoder_stride
lowerCamelCase : List[str] = out_features
lowerCamelCase : Optional[int] = out_indices
def a__ ( self: Optional[Any] )-> Union[str, Any]:
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : str = None
if self.use_labels:
lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : str = self.get_config()
return config, pixel_values, labels
def a__ ( self: List[Any] )-> Optional[int]:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def a__ ( self: Tuple , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = FocalNetModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Tuple = model(__a )
lowerCamelCase : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def a__ ( self: Optional[int] , __a: Dict , __a: Tuple , __a: List[Any] )-> int:
lowerCamelCase : List[Any] = FocalNetBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Optional[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowerCamelCase : Dict = None
lowerCamelCase : Dict = FocalNetBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self: Optional[int] , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = FocalNetForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[str] = model(__a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase : List[str] = 1
lowerCamelCase : Any = FocalNetForMaskedImageModeling(__a )
model.to(__a )
model.eval()
lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : Tuple = model(__a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a__ ( self: str , __a: Optional[Any] , __a: Optional[Any] , __a: Tuple )-> str:
lowerCamelCase : Optional[Any] = self.type_sequence_label_size
lowerCamelCase : Optional[Any] = FocalNetForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : List[str] = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase : int = 1
lowerCamelCase : List[Any] = FocalNetForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : Optional[Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self: int )-> Optional[int]:
lowerCamelCase : str = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = config_and_inputs
lowerCamelCase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =(
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
snake_case__ : Optional[int] =(
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
snake_case__ : Tuple =False
snake_case__ : Dict =False
snake_case__ : Dict =False
snake_case__ : Tuple =False
snake_case__ : Optional[int] =False
def a__ ( self: Union[str, Any] )-> Optional[int]:
lowerCamelCase : List[str] = FocalNetModelTester(self )
lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__a , embed_dim=37 , has_text_modality=__a )
def a__ ( self: List[str] )-> List[str]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self: List[str] )-> Union[str, Any]:
return
def a__ ( self: Tuple )-> Tuple:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: List[Any] )-> Dict:
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def a__ ( self: List[Any] )-> Tuple:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def a__ ( self: List[str] )-> Dict:
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def a__ ( self: Optional[Any] )-> str:
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def a__ ( self: Optional[Any] )-> Dict:
pass
def a__ ( self: Optional[Any] )-> Dict:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : Any = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def a__ ( self: Tuple )-> Optional[int]:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : int = model_class(__a )
lowerCamelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Any = [*signature.parameters.keys()]
lowerCamelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def a__ ( self: str , __a: Union[str, Any] , __a: int , __a: Tuple , __a: List[str] )-> Union[str, Any]:
lowerCamelCase : List[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase : List[str] = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase : List[str] = outputs.hidden_states
lowerCamelCase : Tuple = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__a ) , __a )
# FocalNet has a different seq_length
lowerCamelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCamelCase : Optional[Any] = outputs.reshaped_hidden_states
self.assertEqual(len(__a ) , __a )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = reshaped_hidden_states[0].shape
lowerCamelCase : Tuple = (
reshaped_hidden_states[0].view(__a , __a , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def a__ ( self: Any )-> Any:
lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : List[str] = True
self.check_hidden_states_output(__a , __a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__a , __a , __a , __a )
def a__ ( self: str )-> Union[str, Any]:
lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : List[str] = 3
lowerCamelCase : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase : Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : str = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Union[str, Any] = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
@slow
def a__ ( self: Optional[int] )-> List[Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : List[str] = FocalNetModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def a__ ( self: str )-> Any:
lowerCamelCase , lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : int = _config_zero_init(__a )
for model_class in self.all_model_classes:
lowerCamelCase : int = model_class(config=__a )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class A__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def a__ ( self: Optional[int] )-> Optional[Any]:
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : Tuple = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(__a )
lowerCamelCase : Any = self.default_image_processor
lowerCamelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCamelCase : int = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
lowerCamelCase : Any = model(**__a )
# verify the logits
lowerCamelCase : Tuple = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase : List[str] = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str =(FocalNetBackbone,) if is_torch_available() else ()
snake_case__ : Optional[int] =FocalNetConfig
snake_case__ : str =False
def a__ ( self: Union[str, Any] )-> Tuple:
lowerCamelCase : str = FocalNetModelTester(self )
| 42
| 0
|
"""simple docstring"""
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def snake_case ( UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int]=1024 ) -> Any:
'''simple docstring'''
lowerCamelCase : Union[str, Any] = [], []
lowerCamelCase : List[str] = list(zip(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase : List[Any] = sorted_examples[0]
def is_too_big(UpperCamelCase__ : Tuple ):
return tok(UpperCamelCase__ , return_tensors="""pt""" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
lowerCamelCase : Optional[Any] = new_src + """ """ + src
lowerCamelCase : int = new_tgt + """ """ + tgt
if is_too_big(UpperCamelCase__ ) or is_too_big(UpperCamelCase__ ): # cant fit, finalize example
finished_src.append(UpperCamelCase__ )
finished_tgt.append(UpperCamelCase__ )
lowerCamelCase : Tuple = src, tgt
else: # can fit, keep adding
lowerCamelCase : int = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(UpperCamelCase__ )
finished_tgt.append(UpperCamelCase__ )
return finished_src, finished_tgt
def snake_case ( UpperCamelCase__ : Any , UpperCamelCase__ : Path , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase : List[str] = Path(UpperCamelCase__ )
save_path.mkdir(exist_ok=UpperCamelCase__ )
for split in ["train"]:
lowerCamelCase : int = data_dir / F'{split}.source', data_dir / F'{split}.target'
lowerCamelCase : Optional[int] = [x.rstrip() for x in Path(UpperCamelCase__ ).open().readlines()]
lowerCamelCase : Optional[Any] = [x.rstrip() for x in Path(UpperCamelCase__ ).open().readlines()]
lowerCamelCase : Union[str, Any] = pack_examples(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
print(F'packed {split} split from {len(UpperCamelCase__ )} examples -> {len(UpperCamelCase__ )}.' )
Path(save_path / F'{split}.source' ).open("""w""" ).write("""\n""".join(UpperCamelCase__ ) )
Path(save_path / F'{split}.target' ).open("""w""" ).write("""\n""".join(UpperCamelCase__ ) )
for split in ["val", "test"]:
lowerCamelCase : int = data_dir / F'{split}.source', data_dir / F'{split}.target'
shutil.copyfile(UpperCamelCase__ , save_path / F'{split}.source' )
shutil.copyfile(UpperCamelCase__ , save_path / F'{split}.target' )
def snake_case ( ) -> Any:
'''simple docstring'''
lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument("""--tok_name""" , type=UpperCamelCase__ , help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""--max_seq_len""" , type=UpperCamelCase__ , default=128 )
parser.add_argument("""--data_dir""" , type=UpperCamelCase__ )
parser.add_argument("""--save_path""" , type=UpperCamelCase__ )
lowerCamelCase : Any = parser.parse_args()
lowerCamelCase : List[str] = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(UpperCamelCase__ , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 703
|
"""simple docstring"""
import os
def snake_case ( ) -> Optional[Any]:
with open(os.path.dirname(UpperCamelCase__ ) + """/grid.txt""" ) as f:
lowerCamelCase : int = [] # noqa: E741
for _ in range(20 ):
l.append([int(UpperCamelCase__ ) for x in f.readline().split()] )
lowerCamelCase : Union[str, Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
lowerCamelCase : Dict = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
lowerCamelCase : Tuple = temp
# down
for i in range(17 ):
for j in range(20 ):
lowerCamelCase : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
lowerCamelCase : Optional[Any] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
lowerCamelCase : List[Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
lowerCamelCase : List[str] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
lowerCamelCase : List[str] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
lowerCamelCase : List[Any] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 42
| 0
|
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class A__ ( nn.Module):
"""simple docstring"""
def __init__( self: Dict )-> Dict:
super().__init__()
lowerCamelCase : Tuple = nn.Linear(3 , 4 )
lowerCamelCase : Optional[Any] = nn.BatchNormad(4 )
lowerCamelCase : Optional[Any] = nn.Linear(4 , 5 )
def a__ ( self: List[str] , __a: List[Any] )-> Optional[Any]:
return self.lineara(self.batchnorm(self.lineara(__a ) ) )
class A__ ( __lowercase):
"""simple docstring"""
def a__ ( self: Tuple , __a: int , *__a: Any , **__a: Tuple )-> Tuple:
return (args[0] + 1,) + args[1:], kwargs
class A__ ( __lowercase):
"""simple docstring"""
def a__ ( self: Optional[int] , __a: List[str] , __a: List[Any] )-> List[str]:
return output + 1
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: int )-> str:
lowerCamelCase : List[str] = ModelForTest()
lowerCamelCase : Dict = ModelHook()
add_hook_to_module(__a , __a )
self.assertEqual(test_model._hf_hook , __a )
self.assertTrue(hasattr(__a , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__a )
self.assertFalse(hasattr(__a , """_hf_hook""" ) )
self.assertFalse(hasattr(__a , """_old_forward""" ) )
def a__ ( self: int )-> str:
lowerCamelCase : List[str] = ModelForTest()
lowerCamelCase : Union[str, Any] = ModelHook()
add_hook_to_module(__a , __a )
add_hook_to_module(__a , __a , append=__a )
self.assertEqual(isinstance(test_model._hf_hook , __a ) , __a )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__a , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__a )
self.assertFalse(hasattr(__a , """_hf_hook""" ) )
self.assertFalse(hasattr(__a , """_old_forward""" ) )
def a__ ( self: List[Any] )-> List[str]:
lowerCamelCase : str = ModelForTest()
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : Union[str, Any] = test_model(x + 1 )
lowerCamelCase : Optional[int] = test_model(x + 2 )
lowerCamelCase : List[Any] = PreForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[int] = test_model(__a )
self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCamelCase : Dict = PreForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Tuple = test_model(__a )
self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCamelCase : Any = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[Any] = test_model(__a )
assert torch.allclose(__a , __a , atol=1e-5 )
def a__ ( self: Any )-> Optional[int]:
lowerCamelCase : str = ModelForTest()
lowerCamelCase : List[str] = torch.randn(2 , 3 )
lowerCamelCase : int = test_model(__a )
lowerCamelCase : Dict = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Tuple = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCamelCase : str = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[Any] = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCamelCase : Union[str, Any] = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__a , __a )
lowerCamelCase : str = test_model(__a )
assert torch.allclose(__a , output + 2 , atol=1e-5 )
def a__ ( self: int )-> Dict:
lowerCamelCase : List[Any] = ModelForTest()
lowerCamelCase : Optional[int] = torch.randn(2 , 3 )
lowerCamelCase : List[str] = test_model(__a )
lowerCamelCase : Any = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : str = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 ) )
self.assertTrue(outputa.requires_grad )
lowerCamelCase : Optional[int] = True
lowerCamelCase : Optional[int] = test_model(__a )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def a__ ( self: List[str] )-> Union[str, Any]:
lowerCamelCase : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
lowerCamelCase : str = torch.randn(2 , 3 )
lowerCamelCase : Dict = model(__a )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__a , AlignDevicesHook(io_same_device=__a ) )
lowerCamelCase : Optional[int] = torch.randn(2 , 3 ).to(0 )
lowerCamelCase : str = model(__a )
self.assertEqual(output.device , torch.device(0 ) )
def a__ ( self: List[str] )-> Tuple:
lowerCamelCase : Union[str, Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : Tuple = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Optional[Any] = torch.randn(2 , 3 )
lowerCamelCase : Optional[Any] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
lowerCamelCase : Any = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : int = torch.randn(2 , 3 )
lowerCamelCase : Optional[int] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def a__ ( self: Any )-> List[str]:
lowerCamelCase : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : int = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__a , execution_device=__a , offload=__a )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(__a )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : Optional[Any] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__a , execution_device=__a , offload=__a , offload_buffers=__a )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : Optional[int] = torch.randn(2 , 3 )
lowerCamelCase : int = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def a__ ( self: Optional[Any] )-> List[Any]:
lowerCamelCase : List[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : Any = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__a , execution_device=__a , offload=__a , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(__a )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : int = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__a , execution_device=__a , offload=__a , weights_map=model.state_dict() , offload_buffers=__a , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : Tuple = torch.randn(2 , 3 )
lowerCamelCase : Any = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 704
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
__lowerCamelCase :Any = False
@skip_mps
class A__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] =StableDiffusionAttendAndExcitePipeline
snake_case__ : Any =False
snake_case__ : Dict =TEXT_TO_IMAGE_PARAMS
snake_case__ : Any =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''})
snake_case__ : Dict =TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case__ : str =TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def a__ ( cls: Dict )-> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(__a )
@classmethod
def a__ ( cls: Union[str, Any] )-> Any:
super().tearDownClass()
torch.use_deterministic_algorithms(__a )
def a__ ( self: Tuple )-> Union[str, Any]:
torch.manual_seed(0 )
lowerCamelCase : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , )
lowerCamelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
lowerCamelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
lowerCamelCase : Optional[int] = CLIPTextModel(__a )
lowerCamelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def a__ ( self: Tuple , __a: int , __a: Union[str, Any]=0 )-> Optional[Any]:
if str(__a ).startswith("""mps""" ):
lowerCamelCase : Tuple = torch.manual_seed(__a )
else:
lowerCamelCase : str = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : Dict = {
"""prompt""": """a cat and a frog""",
"""token_indices""": [2, 5],
"""generator""": generator,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""max_iter_to_alter""": 2,
"""thresholds""": {0: 0.7},
}
return inputs
def a__ ( self: Dict )-> str:
lowerCamelCase : Tuple = """cpu"""
lowerCamelCase : List[str] = self.get_dummy_components()
lowerCamelCase : List[Any] = self.pipeline_class(**__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Any = self.get_dummy_inputs(__a )
lowerCamelCase : Union[str, Any] = pipe(**__a ).images
lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
lowerCamelCase : Optional[Any] = np.array(
[0.63_90_53_64, 0.62_89_73_07, 0.48_59_90_17, 0.5_13_36_24, 0.5_55_00_48, 0.45_76_95_16, 0.50_32_69_73, 0.5_02_31_39, 0.45_38_44_96] )
lowerCamelCase : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__a , 1e-3 )
def a__ ( self: int )-> Optional[Any]:
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def a__ ( self: Union[str, Any] )-> Optional[int]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a__ ( self: Tuple )-> int:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def a__ ( self: Dict )-> List[Any]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def a__ ( self: Optional[int] )-> Dict:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def a__ ( self: Any )-> Tuple:
super().test_save_load_local(expected_max_difference=5e-4 )
def a__ ( self: str )-> str:
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class A__ ( unittest.TestCase):
"""simple docstring"""
@classmethod
def a__ ( cls: Any )-> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(__a )
@classmethod
def a__ ( cls: Dict )-> Optional[int]:
super().tearDownClass()
torch.use_deterministic_algorithms(__a )
def a__ ( self: int )-> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : List[Any] = torch.manual_seed(51 )
lowerCamelCase : List[str] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , safety_checker=__a , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
lowerCamelCase : Dict = """a painting of an elephant with glasses"""
lowerCamelCase : Any = [5, 7]
lowerCamelCase : Tuple = pipe(
prompt=__a , token_indices=__a , guidance_scale=7.5 , generator=__a , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0]
lowerCamelCase : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 42
| 0
|
"""simple docstring"""
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def snake_case ( ) -> Dict:
lowerCamelCase : Optional[int] = HfArgumentParser(UpperCamelCase__ )
lowerCamelCase : Tuple = parser.parse_args_into_dataclasses()[0]
lowerCamelCase : Union[str, Any] = TensorFlowBenchmark(args=UpperCamelCase__ )
try:
lowerCamelCase : Tuple = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowerCamelCase : Dict = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
lowerCamelCase : str = """ """.join(str(UpperCamelCase__ ).split(""" """ )[:-1] )
lowerCamelCase : str = """"""
lowerCamelCase : Any = eval(str(UpperCamelCase__ ).split(""" """ )[-1] )
lowerCamelCase : Optional[int] = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
lowerCamelCase : Optional[int] = full_error_msg + begin_error_msg + str(UpperCamelCase__ )
raise ValueError(UpperCamelCase__ )
benchmark.run()
if __name__ == "__main__":
main()
| 705
|
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class A__ :
"""simple docstring"""
def __init__( self: List[str] , __a: List[str] , __a: Dict=13 , __a: Tuple=7 , __a: Dict=False , __a: str=True , __a: List[Any]=False , __a: Dict=True , __a: Any=33 , __a: Optional[Any]=32 , __a: List[Any]=5 , __a: Any=4 , __a: Dict=37 , __a: str="gelu" , __a: str=0.1 , __a: int=0.1 , __a: Optional[int]=512 , __a: List[Any]=16 , __a: int=2 , __a: int=0.02 , __a: Optional[int]=3 , __a: str=4 , __a: Tuple=None , )-> Tuple:
lowerCamelCase : Union[str, Any] = parent
lowerCamelCase : Tuple = batch_size
lowerCamelCase : Any = seq_length
lowerCamelCase : Any = is_training
lowerCamelCase : Tuple = use_input_mask
lowerCamelCase : int = use_token_type_ids
lowerCamelCase : List[str] = use_labels
lowerCamelCase : Optional[int] = vocab_size
lowerCamelCase : Tuple = hidden_size
lowerCamelCase : List[str] = num_hidden_layers
lowerCamelCase : Optional[int] = num_attention_heads
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : Union[str, Any] = hidden_dropout_prob
lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase : Any = max_position_embeddings
lowerCamelCase : str = type_vocab_size
lowerCamelCase : List[Any] = type_sequence_label_size
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : Union[str, Any] = num_labels
lowerCamelCase : Optional[Any] = num_choices
lowerCamelCase : Any = scope
def a__ ( self: Optional[int] )-> List[Any]:
lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : Dict = None
if self.use_input_mask:
lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase : Any = None
lowerCamelCase : int = None
lowerCamelCase : Union[str, Any] = None
if self.use_labels:
lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase : List[str] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self: Tuple )-> Union[str, Any]:
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def a__ ( self: List[Any] , __a: List[str] , __a: str , __a: Tuple , __a: List[str] , __a: List[str] , __a: str )-> int:
lowerCamelCase : Optional[int] = EsmModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a , attention_mask=__a )
lowerCamelCase : str = model(__a )
lowerCamelCase : Optional[Any] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ ( self: int , __a: Union[str, Any] , __a: Optional[int] , __a: List[str] , __a: str , __a: List[str] , __a: Tuple )-> int:
lowerCamelCase : str = EsmForMaskedLM(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self: List[str] , __a: List[Any] , __a: List[str] , __a: int , __a: Union[str, Any] , __a: List[Any] , __a: Tuple )-> List[str]:
lowerCamelCase : Tuple = self.num_labels
lowerCamelCase : Dict = EsmForTokenClassification(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self: Optional[int] )-> Optional[int]:
lowerCamelCase : Any = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Tuple = config_and_inputs
lowerCamelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Any =False
snake_case__ : Dict =(
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case__ : Dict =()
snake_case__ : Optional[int] =(
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : Any =True
def a__ ( self: Optional[int] )-> Optional[int]:
lowerCamelCase : Optional[Any] = EsmModelTester(self )
lowerCamelCase : Any = ConfigTester(self , config_class=__a , hidden_size=37 )
def a__ ( self: List[Any] )-> Optional[Any]:
self.config_tester.run_common_tests()
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: Tuple )-> Any:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase : Tuple = type
self.model_tester.create_and_check_model(*__a )
def a__ ( self: List[str] )-> Tuple:
lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def a__ ( self: Any )-> List[Any]:
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : int = EsmModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def a__ ( self: str )-> List[str]:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase : Union[str, Any] = EsmEmbeddings(config=__a )
lowerCamelCase : List[str] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowerCamelCase : Union[str, Any] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowerCamelCase : Optional[Any] = create_position_ids_from_input_ids(__a , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
def a__ ( self: Optional[int] )-> int:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase : Any = EsmEmbeddings(config=__a )
lowerCamelCase : Dict = torch.empty(2 , 4 , 30 )
lowerCamelCase : List[Any] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowerCamelCase : Any = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowerCamelCase : List[str] = embeddings.create_position_ids_from_inputs_embeds(__a )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def a__ ( self: Any )-> Optional[Any]:
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def a__ ( self: Dict )-> Dict:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def a__ ( self: List[str] )-> Dict:
pass
@require_torch
class A__ ( __lowercase):
"""simple docstring"""
@slow
def a__ ( self: Any )-> Union[str, Any]:
with torch.no_grad():
lowerCamelCase : Union[str, Any] = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
lowerCamelCase : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase : Tuple = model(__a )[0]
lowerCamelCase : Dict = 33
lowerCamelCase : List[str] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __a )
lowerCamelCase : Tuple = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
@slow
def a__ ( self: Dict )-> str:
with torch.no_grad():
lowerCamelCase : Any = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
lowerCamelCase : Optional[Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase : Any = model(__a )[0]
# compare the actual values for a slice.
lowerCamelCase : Tuple = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
| 42
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :List[str] = logging.get_logger(__name__)
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Union[str, Any] ='''timm_backbone'''
def __init__( self: List[str] , __a: Tuple=None , __a: List[str]=3 , __a: List[str]=True , __a: Any=True , __a: Union[str, Any]=None , **__a: Union[str, Any] , )-> List[Any]:
super().__init__(**__a )
lowerCamelCase : Any = backbone
lowerCamelCase : List[Any] = num_channels
lowerCamelCase : Optional[int] = features_only
lowerCamelCase : Any = use_pretrained_backbone
lowerCamelCase : Tuple = True
lowerCamelCase : Union[str, Any] = out_indices if out_indices is not None else (-1,)
| 706
|
"""simple docstring"""
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase :str = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =AlbertTokenizer
snake_case__ : Optional[Any] =AlbertTokenizerFast
snake_case__ : Optional[int] =True
snake_case__ : Any =True
snake_case__ : Optional[int] =True
def a__ ( self: Dict )-> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase : int = AlbertTokenizer(__a )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self: Tuple , __a: Tuple )-> Union[str, Any]:
lowerCamelCase : List[str] = """this is a test"""
lowerCamelCase : int = """this is a test"""
return input_text, output_text
def a__ ( self: Any )-> List[Any]:
lowerCamelCase : int = """<pad>"""
lowerCamelCase : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def a__ ( self: Tuple )-> str:
lowerCamelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(__a ) , 30_000 )
def a__ ( self: List[str] )-> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def a__ ( self: Optional[Any] )-> Union[str, Any]:
if not self.test_rust_tokenizer:
return
lowerCamelCase : str = self.get_tokenizer()
lowerCamelCase : Tuple = self.get_rust_tokenizer()
lowerCamelCase : Union[str, Any] = """I was born in 92000, and this is falsé."""
lowerCamelCase : List[str] = tokenizer.tokenize(__a )
lowerCamelCase : Tuple = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
lowerCamelCase : Dict = tokenizer.encode(__a , add_special_tokens=__a )
lowerCamelCase : List[str] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
lowerCamelCase : Any = self.get_rust_tokenizer()
lowerCamelCase : List[str] = tokenizer.encode(__a )
lowerCamelCase : str = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
def a__ ( self: Tuple )-> List[Any]:
lowerCamelCase : List[str] = AlbertTokenizer(__a , keep_accents=__a )
lowerCamelCase : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__a , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [48, 25, 21, 1_289] )
lowerCamelCase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(__a , [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] )
lowerCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def a__ ( self: Tuple )-> str:
lowerCamelCase : str = AlbertTokenizer(__a )
lowerCamelCase : Union[str, Any] = tokenizer.encode("""sequence builders""" )
lowerCamelCase : List[Any] = tokenizer.encode("""multi-sequence build""" )
lowerCamelCase : Any = tokenizer.build_inputs_with_special_tokens(__a )
lowerCamelCase : str = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def a__ ( self: Any )-> Dict:
# fmt: off
lowerCamelCase : Optional[Any] = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 42
| 0
|
from __future__ import annotations
def snake_case ( UpperCamelCase__ : list[list[int]] ) -> bool:
lowerCamelCase : Dict = len(UpperCamelCase__ )
# We need to create solution object to save path.
lowerCamelCase : Any = [[0 for _ in range(UpperCamelCase__ )] for _ in range(UpperCamelCase__ )]
lowerCamelCase : Dict = run_maze(UpperCamelCase__ , 0 , 0 , UpperCamelCase__ )
if solved:
print("""\n""".join(str(UpperCamelCase__ ) for row in solutions ) )
else:
print("""No solution exists!""" )
return solved
def snake_case ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[list[int]] ) -> bool:
lowerCamelCase : Any = len(UpperCamelCase__ )
# Final check point.
if i == j == (size - 1):
lowerCamelCase : int = 1
return True
lowerCamelCase : int = (not i < 0) and (not j < 0) # Check lower bounds
lowerCamelCase : Optional[int] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
lowerCamelCase : List[Any] = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
lowerCamelCase : str = 1
# check for directions
if (
run_maze(UpperCamelCase__ , i + 1 , UpperCamelCase__ , UpperCamelCase__ )
or run_maze(UpperCamelCase__ , UpperCamelCase__ , j + 1 , UpperCamelCase__ )
or run_maze(UpperCamelCase__ , i - 1 , UpperCamelCase__ , UpperCamelCase__ )
or run_maze(UpperCamelCase__ , UpperCamelCase__ , j - 1 , UpperCamelCase__ )
):
return True
lowerCamelCase : Optional[int] = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707
|
"""simple docstring"""
__lowerCamelCase :List[Any] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__lowerCamelCase :Union[str, Any] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def snake_case ( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] ) -> list[int]:
lowerCamelCase : Tuple = True
lowerCamelCase : Any = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
order.append(UpperCamelCase__ )
return order
def snake_case ( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] ) -> list[int]:
lowerCamelCase : List[Any] = True
lowerCamelCase : int = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return component
def snake_case ( UpperCamelCase__ : dict[int, list[int]] ) -> list[list[int]]:
lowerCamelCase : int = len(UpperCamelCase__ ) * [False]
lowerCamelCase : dict[int, list[int]] = {vert: [] for vert in range(len(UpperCamelCase__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(UpperCamelCase__ )
lowerCamelCase : int = []
for i, was_visited in enumerate(UpperCamelCase__ ):
if not was_visited:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Tuple = []
lowerCamelCase : str = len(UpperCamelCase__ ) * [False]
for i in range(len(UpperCamelCase__ ) ):
lowerCamelCase : Any = order[len(UpperCamelCase__ ) - i - 1]
if not visited[vert]:
lowerCamelCase : List[str] = find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
components_list.append(UpperCamelCase__ )
return components_list
| 42
| 0
|
"""simple docstring"""
from __future__ import annotations
def lowerCAmelCase ( UpperCamelCase__ : int ) -> bool:
lowerCamelCase : Dict = str(UpperCamelCase__ )
return len(UpperCamelCase__ ) == 9 and set(UpperCamelCase__ ) == set("""123456789""" )
def lowerCAmelCase ( ) -> int | None:
for base_num in range(9999 , 4999 , -1 ):
lowerCamelCase : List[Any] = 100002 * base_num
if is_9_pandigital(UpperCamelCase__ ):
return candidate
for base_num in range(333 , 99 , -1 ):
lowerCamelCase : Tuple = 1002003 * base_num
if is_9_pandigital(UpperCamelCase__ ):
return candidate
return None
if __name__ == "__main__":
print(f"""{solution() = }""")
| 708
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :str = logging.get_logger(__name__)
__lowerCamelCase :Any = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : List[Any] ='''time_series_transformer'''
snake_case__ : List[Any] ={
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self: List[str] , __a: Optional[int] = None , __a: Optional[int] = None , __a: str = "student_t" , __a: str = "nll" , __a: int = 1 , __a: List[int] = [1, 2, 3, 4, 5, 6, 7] , __a: Optional[Union[str, bool]] = "mean" , __a: int = 0 , __a: int = 0 , __a: int = 0 , __a: int = 0 , __a: Optional[List[int]] = None , __a: Optional[List[int]] = None , __a: int = 32 , __a: int = 32 , __a: int = 2 , __a: int = 2 , __a: int = 2 , __a: int = 2 , __a: bool = True , __a: str = "gelu" , __a: int = 64 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: int = 100 , __a: float = 0.02 , __a: Tuple=True , **__a: str , )-> Any:
# time series specific configuration
lowerCamelCase : str = prediction_length
lowerCamelCase : Optional[Any] = context_length or prediction_length
lowerCamelCase : Tuple = distribution_output
lowerCamelCase : Any = loss
lowerCamelCase : List[Any] = input_size
lowerCamelCase : int = num_time_features
lowerCamelCase : Dict = lags_sequence
lowerCamelCase : Optional[int] = scaling
lowerCamelCase : int = num_dynamic_real_features
lowerCamelCase : Tuple = num_static_real_features
lowerCamelCase : Any = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
lowerCamelCase : int = cardinality
else:
lowerCamelCase : Dict = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
lowerCamelCase : str = embedding_dimension
else:
lowerCamelCase : str = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCamelCase : Any = num_parallel_samples
# Transformer architecture configuration
lowerCamelCase : Any = input_size * len(__a ) + self._number_of_features
lowerCamelCase : List[str] = d_model
lowerCamelCase : Tuple = encoder_attention_heads
lowerCamelCase : Optional[int] = decoder_attention_heads
lowerCamelCase : Union[str, Any] = encoder_ffn_dim
lowerCamelCase : str = decoder_ffn_dim
lowerCamelCase : str = encoder_layers
lowerCamelCase : Any = decoder_layers
lowerCamelCase : Optional[int] = dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Tuple = activation_dropout
lowerCamelCase : Optional[int] = encoder_layerdrop
lowerCamelCase : int = decoder_layerdrop
lowerCamelCase : Optional[int] = activation_function
lowerCamelCase : Optional[Any] = init_std
lowerCamelCase : Optional[Any] = use_cache
super().__init__(is_encoder_decoder=__a , **__a )
@property
def a__ ( self: int )-> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 42
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =AudioLDMPipeline
snake_case__ : int =TEXT_TO_AUDIO_PARAMS
snake_case__ : Dict =TEXT_TO_AUDIO_BATCH_PARAMS
snake_case__ : Optional[Any] =frozenset(
[
'''num_inference_steps''',
'''num_waveforms_per_prompt''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
])
def a__ ( self: List[Any] )-> Optional[int]:
torch.manual_seed(0 )
lowerCamelCase : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(32, 64) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=__a , )
lowerCamelCase : List[str] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
lowerCamelCase : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase : List[Any] = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , projection_dim=32 , )
lowerCamelCase : Union[str, Any] = ClapTextModelWithProjection(__a )
lowerCamelCase : List[Any] = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=77 )
lowerCamelCase : List[Any] = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16_000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=__a , )
lowerCamelCase : Optional[Any] = SpeechTaHifiGan(__a )
lowerCamelCase : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""vocoder""": vocoder,
}
return components
def a__ ( self: Any , __a: Optional[Any] , __a: List[str]=0 )-> Union[str, Any]:
if str(__a ).startswith("""mps""" ):
lowerCamelCase : List[str] = torch.manual_seed(__a )
else:
lowerCamelCase : Optional[Any] = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : Dict = {
"""prompt""": """A hammer hitting a wooden surface""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
}
return inputs
def a__ ( self: List[Any] )-> Optional[Any]:
lowerCamelCase : List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase : Optional[int] = self.get_dummy_components()
lowerCamelCase : List[str] = AudioLDMPipeline(**__a )
lowerCamelCase : Tuple = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Optional[int] = self.get_dummy_inputs(__a )
lowerCamelCase : List[Any] = audioldm_pipe(**__a )
lowerCamelCase : Optional[Any] = output.audios[0]
assert audio.ndim == 1
assert len(__a ) == 256
lowerCamelCase : str = audio[:10]
lowerCamelCase : Any = np.array(
[-0.00_50, 0.00_50, -0.00_60, 0.00_33, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_33] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def a__ ( self: Tuple )-> Dict:
lowerCamelCase : str = self.get_dummy_components()
lowerCamelCase : int = AudioLDMPipeline(**__a )
lowerCamelCase : Tuple = audioldm_pipe.to(__a )
lowerCamelCase : int = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : int = self.get_dummy_inputs(__a )
lowerCamelCase : Optional[int] = 3 * [inputs["""prompt"""]]
# forward
lowerCamelCase : Union[str, Any] = audioldm_pipe(**__a )
lowerCamelCase : List[Any] = output.audios[0]
lowerCamelCase : Optional[int] = self.get_dummy_inputs(__a )
lowerCamelCase : Tuple = 3 * [inputs.pop("""prompt""" )]
lowerCamelCase : Dict = audioldm_pipe.tokenizer(
__a , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__a , return_tensors="""pt""" , )
lowerCamelCase : List[Any] = text_inputs["""input_ids"""].to(__a )
lowerCamelCase : List[str] = audioldm_pipe.text_encoder(
__a , )
lowerCamelCase : Any = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowerCamelCase : List[str] = F.normalize(__a , dim=-1 )
lowerCamelCase : str = prompt_embeds
# forward
lowerCamelCase : Dict = audioldm_pipe(**__a )
lowerCamelCase : List[Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def a__ ( self: Optional[int] )-> Union[str, Any]:
lowerCamelCase : str = self.get_dummy_components()
lowerCamelCase : List[Any] = AudioLDMPipeline(**__a )
lowerCamelCase : List[Any] = audioldm_pipe.to(__a )
lowerCamelCase : Tuple = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Optional[int] = self.get_dummy_inputs(__a )
lowerCamelCase : Optional[Any] = 3 * ["""this is a negative prompt"""]
lowerCamelCase : int = negative_prompt
lowerCamelCase : Optional[int] = 3 * [inputs["""prompt"""]]
# forward
lowerCamelCase : Any = audioldm_pipe(**__a )
lowerCamelCase : Optional[Any] = output.audios[0]
lowerCamelCase : Any = self.get_dummy_inputs(__a )
lowerCamelCase : List[Any] = 3 * [inputs.pop("""prompt""" )]
lowerCamelCase : int = []
for p in [prompt, negative_prompt]:
lowerCamelCase : Any = audioldm_pipe.tokenizer(
__a , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__a , return_tensors="""pt""" , )
lowerCamelCase : Dict = text_inputs["""input_ids"""].to(__a )
lowerCamelCase : List[Any] = audioldm_pipe.text_encoder(
__a , )
lowerCamelCase : Union[str, Any] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowerCamelCase : str = F.normalize(__a , dim=-1 )
embeds.append(__a )
lowerCamelCase : Union[str, Any] = embeds
# forward
lowerCamelCase : List[str] = audioldm_pipe(**__a )
lowerCamelCase : List[str] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def a__ ( self: Optional[Any] )-> List[str]:
lowerCamelCase : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase : Dict = self.get_dummy_components()
lowerCamelCase : List[Any] = PNDMScheduler(skip_prk_steps=__a )
lowerCamelCase : Optional[Any] = AudioLDMPipeline(**__a )
lowerCamelCase : int = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : List[Any] = self.get_dummy_inputs(__a )
lowerCamelCase : Any = """egg cracking"""
lowerCamelCase : str = audioldm_pipe(**__a , negative_prompt=__a )
lowerCamelCase : Optional[Any] = output.audios[0]
assert audio.ndim == 1
assert len(__a ) == 256
lowerCamelCase : int = audio[:10]
lowerCamelCase : List[Any] = np.array(
[-0.00_51, 0.00_50, -0.00_60, 0.00_34, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_32] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def a__ ( self: List[str] )-> Union[str, Any]:
lowerCamelCase : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase : Tuple = self.get_dummy_components()
lowerCamelCase : Dict = PNDMScheduler(skip_prk_steps=__a )
lowerCamelCase : Any = AudioLDMPipeline(**__a )
lowerCamelCase : List[Any] = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Optional[Any] = """A hammer hitting a wooden surface"""
# test num_waveforms_per_prompt=1 (default)
lowerCamelCase : Dict = audioldm_pipe(__a , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
lowerCamelCase : List[Any] = 2
lowerCamelCase : Tuple = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
lowerCamelCase : Optional[int] = 2
lowerCamelCase : Dict = audioldm_pipe(__a , num_inference_steps=2 , num_waveforms_per_prompt=__a ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
lowerCamelCase : str = 2
lowerCamelCase : Union[str, Any] = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=__a ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def a__ ( self: Optional[Any] )-> int:
lowerCamelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase : str = self.get_dummy_components()
lowerCamelCase : str = AudioLDMPipeline(**__a )
lowerCamelCase : Any = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : List[Any] = audioldm_pipe.vocoder.config.sampling_rate
lowerCamelCase : Union[str, Any] = self.get_dummy_inputs(__a )
lowerCamelCase : str = audioldm_pipe(audio_length_in_s=0.0_16 , **__a )
lowerCamelCase : int = output.audios[0]
assert audio.ndim == 1
assert len(__a ) / vocoder_sampling_rate == 0.0_16
lowerCamelCase : int = audioldm_pipe(audio_length_in_s=0.0_32 , **__a )
lowerCamelCase : Any = output.audios[0]
assert audio.ndim == 1
assert len(__a ) / vocoder_sampling_rate == 0.0_32
def a__ ( self: Union[str, Any] )-> int:
lowerCamelCase : Tuple = self.get_dummy_components()
lowerCamelCase : int = AudioLDMPipeline(**__a )
lowerCamelCase : List[str] = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : int = ["""hey"""]
lowerCamelCase : str = audioldm_pipe(__a , num_inference_steps=1 )
lowerCamelCase : Optional[Any] = output.audios.shape
assert audio_shape == (1, 256)
lowerCamelCase : Optional[int] = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
lowerCamelCase : Optional[Any] = SpeechTaHifiGan(__a ).to(__a )
lowerCamelCase : Dict = audioldm_pipe(__a , num_inference_steps=1 )
lowerCamelCase : List[str] = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def a__ ( self: Optional[Any] )-> int:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__a )
def a__ ( self: List[Any] )-> str:
self._test_inference_batch_single_identical(test_mean_pixel_difference=__a )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def a__ ( self: Dict )-> str:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__a )
@slow
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Optional[Any] )-> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self: List[Any] , __a: str , __a: List[str]="cpu" , __a: Optional[int]=torch.floataa , __a: Optional[Any]=0 )-> List[Any]:
lowerCamelCase : Tuple = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : Optional[Any] = np.random.RandomState(__a ).standard_normal((1, 8, 128, 16) )
lowerCamelCase : str = torch.from_numpy(__a ).to(device=__a , dtype=__a )
lowerCamelCase : Optional[int] = {
"""prompt""": """A hammer hitting a wooden surface""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 2.5,
}
return inputs
def a__ ( self: Tuple )-> Optional[int]:
lowerCamelCase : Dict = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
lowerCamelCase : Optional[Any] = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : int = self.get_inputs(__a )
lowerCamelCase : Optional[Any] = 25
lowerCamelCase : Tuple = audioldm_pipe(**__a ).audios[0]
assert audio.ndim == 1
assert len(__a ) == 81_920
lowerCamelCase : Any = audio[77_230:77_240]
lowerCamelCase : Any = np.array(
[-0.48_84, -0.46_07, 0.00_23, 0.50_07, 0.58_96, 0.51_51, 0.38_13, -0.02_08, -0.36_87, -0.43_15] )
lowerCamelCase : int = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def a__ ( self: Any )-> List[Any]:
lowerCamelCase : Dict = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
lowerCamelCase : str = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
lowerCamelCase : Optional[int] = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Tuple = self.get_inputs(__a )
lowerCamelCase : int = audioldm_pipe(**__a ).audios[0]
assert audio.ndim == 1
assert len(__a ) == 81_920
lowerCamelCase : List[str] = audio[27_780:27_790]
lowerCamelCase : Tuple = np.array([-0.21_31, -0.08_73, -0.01_24, -0.01_89, 0.05_69, 0.13_73, 0.18_83, 0.28_86, 0.32_97, 0.22_12] )
lowerCamelCase : int = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 709
|
"""simple docstring"""
from __future__ import annotations
__lowerCamelCase :int = 10
def snake_case ( UpperCamelCase__ : list[int] ) -> list[int]:
lowerCamelCase : int = 1
lowerCamelCase : Union[str, Any] = max(UpperCamelCase__ )
while placement <= max_digit:
# declare and initialize empty buckets
lowerCamelCase : list[list] = [[] for _ in range(UpperCamelCase__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
lowerCamelCase : Any = int((i / placement) % RADIX )
buckets[tmp].append(UpperCamelCase__ )
# put each buckets' contents into list_of_ints
lowerCamelCase : Dict = 0
for b in range(UpperCamelCase__ ):
for i in buckets[b]:
lowerCamelCase : List[str] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
| 0
|
"""simple docstring"""
def snake_case ( UpperCamelCase__ : int = 1000 ) -> int:
return sum(e for e in range(3 , UpperCamelCase__ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 710
|
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=None ) -> Tuple:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match'
lowerCamelCase : Dict = nn.Parameter(UpperCamelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match'
lowerCamelCase : Any = nn.Parameter(UpperCamelCase__ )
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ) -> Union[str, Any]:
# set torch weights for 1-to-1 comparison
lowerCamelCase : Dict = np.asarray(weights[0] )
lowerCamelCase : List[Any] = np.asarray(weights[1] )
lowerCamelCase : List[str] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any ) -> List[Any]:
# set torch weights for 1-to-1 comparison
lowerCamelCase : Tuple = np.asarray(weights[0] )
lowerCamelCase : Any = np.asarray(weights[1] )
lowerCamelCase : List[Any] = np.asarray(weights[2] )
lowerCamelCase : List[str] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def snake_case ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ) -> Optional[Any]:
# layernorm 1
lowerCamelCase : str = weights[0][0][0]
lowerCamelCase : Optional[int] = np.asarray(layer_norm_a[0] )
lowerCamelCase : Tuple = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# lsh weights + output
lowerCamelCase : List[Any] = weights[0][1]
if len(UpperCamelCase__ ) < 4:
set_layer_weights_in_torch_lsh(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ )
else:
set_layer_weights_in_torch_local(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ )
# intermediate weighs
lowerCamelCase : int = weights[2][0][1][2]
# Chunked Feed Forward
if len(UpperCamelCase__ ) == 4:
lowerCamelCase : Dict = intermediate_weights[2]
# layernorm 2
lowerCamelCase : Optional[int] = np.asarray(intermediate_weights[0][0] )
lowerCamelCase : Tuple = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# intermediate dense
lowerCamelCase : Optional[Any] = np.asarray(intermediate_weights[1][0] )
lowerCamelCase : Union[str, Any] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
# intermediate out
lowerCamelCase : Optional[int] = np.asarray(intermediate_weights[4][0] )
lowerCamelCase : List[Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int ) -> List[Any]:
# reformer model
lowerCamelCase : List[Any] = torch_model.reformer
# word embeds
lowerCamelCase : Union[str, Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(UpperCamelCase__ ) , )
if isinstance(weights[3] , UpperCamelCase__ ):
lowerCamelCase : Union[str, Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCamelCase : str = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'{position_embeddings[emb_idx]} emb does not match'
lowerCamelCase : Dict = nn.Parameter(torch.tensor(UpperCamelCase__ ) )
lowerCamelCase : int = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
UpperCamelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCamelCase : Dict = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# output layer norm
lowerCamelCase : Any = np.asarray(weights[7][0] )
lowerCamelCase : List[str] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# output embeddings
lowerCamelCase : List[Any] = np.asarray(weights[9][0] )
lowerCamelCase : Optional[int] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str ) -> Optional[int]:
# Initialise PyTorch model
lowerCamelCase : Union[str, Any] = ReformerConfig.from_json_file(UpperCamelCase__ )
print(F'Building PyTorch model from configuration: {config}' )
lowerCamelCase : str = ReformerModelWithLMHead(UpperCamelCase__ )
with open(UpperCamelCase__ , """rb""" ) as f:
lowerCamelCase : str = pickle.load(UpperCamelCase__ )["""weights"""]
set_model_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , config.hidden_size )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
__lowerCamelCase :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCamelCase :Optional[int] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 42
| 0
|
"""simple docstring"""
import sys
def snake_case ( UpperCamelCase__ : Any ) -> Optional[int]:
lowerCamelCase : List[str] = len(UpperCamelCase__ )
lowerCamelCase : str = [[0 for x in range(UpperCamelCase__ )] for x in range(UpperCamelCase__ )]
lowerCamelCase : Optional[Any] = [[0 for x in range(UpperCamelCase__ )] for x in range(UpperCamelCase__ )]
for chain_length in range(2 , UpperCamelCase__ ):
for a in range(1 , n - chain_length + 1 ):
lowerCamelCase : Optional[int] = a + chain_length - 1
lowerCamelCase : Dict = sys.maxsize
for c in range(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase : Dict = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
lowerCamelCase : Any = cost
lowerCamelCase : int = c
return matrix, sol
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] ) -> Optional[Any]:
if i == j:
print("""A""" + str(UpperCamelCase__ ) , end=""" """ )
else:
print("""(""" , end=""" """ )
print_optiomal_solution(UpperCamelCase__ , UpperCamelCase__ , optimal_solution[i][j] )
print_optiomal_solution(UpperCamelCase__ , optimal_solution[i][j] + 1 , UpperCamelCase__ )
print(""")""" , end=""" """ )
def snake_case ( ) -> int:
lowerCamelCase : List[Any] = [30, 35, 15, 5, 10, 20, 25]
lowerCamelCase : Dict = len(UpperCamelCase__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
lowerCamelCase : Any = matrix_chain_order(UpperCamelCase__ )
print("""No. of Operation required: """ + str(matrix[1][n - 1] ) )
print_optiomal_solution(UpperCamelCase__ , 1 , n - 1 )
if __name__ == "__main__":
main()
| 711
|
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class A__ ( nn.Module):
"""simple docstring"""
def __init__( self: Dict )-> Dict:
super().__init__()
lowerCamelCase : Tuple = nn.Linear(3 , 4 )
lowerCamelCase : Optional[Any] = nn.BatchNormad(4 )
lowerCamelCase : Optional[Any] = nn.Linear(4 , 5 )
def a__ ( self: List[str] , __a: List[Any] )-> Optional[Any]:
return self.lineara(self.batchnorm(self.lineara(__a ) ) )
class A__ ( __lowercase):
"""simple docstring"""
def a__ ( self: Tuple , __a: int , *__a: Any , **__a: Tuple )-> Tuple:
return (args[0] + 1,) + args[1:], kwargs
class A__ ( __lowercase):
"""simple docstring"""
def a__ ( self: Optional[int] , __a: List[str] , __a: List[Any] )-> List[str]:
return output + 1
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: int )-> str:
lowerCamelCase : List[str] = ModelForTest()
lowerCamelCase : Dict = ModelHook()
add_hook_to_module(__a , __a )
self.assertEqual(test_model._hf_hook , __a )
self.assertTrue(hasattr(__a , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__a )
self.assertFalse(hasattr(__a , """_hf_hook""" ) )
self.assertFalse(hasattr(__a , """_old_forward""" ) )
def a__ ( self: int )-> str:
lowerCamelCase : List[str] = ModelForTest()
lowerCamelCase : Union[str, Any] = ModelHook()
add_hook_to_module(__a , __a )
add_hook_to_module(__a , __a , append=__a )
self.assertEqual(isinstance(test_model._hf_hook , __a ) , __a )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__a , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__a )
self.assertFalse(hasattr(__a , """_hf_hook""" ) )
self.assertFalse(hasattr(__a , """_old_forward""" ) )
def a__ ( self: List[Any] )-> List[str]:
lowerCamelCase : str = ModelForTest()
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : Union[str, Any] = test_model(x + 1 )
lowerCamelCase : Optional[int] = test_model(x + 2 )
lowerCamelCase : List[Any] = PreForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[int] = test_model(__a )
self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCamelCase : Dict = PreForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Tuple = test_model(__a )
self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCamelCase : Any = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[Any] = test_model(__a )
assert torch.allclose(__a , __a , atol=1e-5 )
def a__ ( self: Any )-> Optional[int]:
lowerCamelCase : str = ModelForTest()
lowerCamelCase : List[str] = torch.randn(2 , 3 )
lowerCamelCase : int = test_model(__a )
lowerCamelCase : Dict = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Tuple = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCamelCase : str = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[Any] = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCamelCase : Union[str, Any] = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__a , __a )
lowerCamelCase : str = test_model(__a )
assert torch.allclose(__a , output + 2 , atol=1e-5 )
def a__ ( self: int )-> Dict:
lowerCamelCase : List[Any] = ModelForTest()
lowerCamelCase : Optional[int] = torch.randn(2 , 3 )
lowerCamelCase : List[str] = test_model(__a )
lowerCamelCase : Any = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : str = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 ) )
self.assertTrue(outputa.requires_grad )
lowerCamelCase : Optional[int] = True
lowerCamelCase : Optional[int] = test_model(__a )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def a__ ( self: List[str] )-> Union[str, Any]:
lowerCamelCase : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
lowerCamelCase : str = torch.randn(2 , 3 )
lowerCamelCase : Dict = model(__a )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__a , AlignDevicesHook(io_same_device=__a ) )
lowerCamelCase : Optional[int] = torch.randn(2 , 3 ).to(0 )
lowerCamelCase : str = model(__a )
self.assertEqual(output.device , torch.device(0 ) )
def a__ ( self: List[str] )-> Tuple:
lowerCamelCase : Union[str, Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : Tuple = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Optional[Any] = torch.randn(2 , 3 )
lowerCamelCase : Optional[Any] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
lowerCamelCase : Any = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : int = torch.randn(2 , 3 )
lowerCamelCase : Optional[int] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def a__ ( self: Any )-> List[str]:
lowerCamelCase : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : int = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__a , execution_device=__a , offload=__a )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(__a )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : Optional[Any] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__a , execution_device=__a , offload=__a , offload_buffers=__a )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : Optional[int] = torch.randn(2 , 3 )
lowerCamelCase : int = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def a__ ( self: Optional[Any] )-> List[Any]:
lowerCamelCase : List[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : Any = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__a , execution_device=__a , offload=__a , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(__a )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : int = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__a , execution_device=__a , offload=__a , weights_map=model.state_dict() , offload_buffers=__a , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : Tuple = torch.randn(2 , 3 )
lowerCamelCase : Any = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 42
| 0
|
"""simple docstring"""
from math import pow, sqrt
def snake_case ( *UpperCamelCase__ : float ) -> bool:
lowerCamelCase : Dict = len(UpperCamelCase__ ) > 0 and all(value > 0.0 for value in values )
return result
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float | ValueError:
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(UpperCamelCase__ , UpperCamelCase__ )
else ValueError("""Input Error: Molar mass values must greater than 0.""" )
)
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float | ValueError:
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float | ValueError:
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float | ValueError:
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float | ValueError:
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
| 712
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCamelCase :Optional[Any] = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Union[str, Any] = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Tuple =CTRLTokenizer
snake_case__ : List[Any] =False
snake_case__ : Optional[Any] =False
def a__ ( self: str )-> List[str]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase : str = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""]
lowerCamelCase : int = dict(zip(__a , range(len(__a ) ) ) )
lowerCamelCase : Union[str, Any] = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""]
lowerCamelCase : Optional[Any] = {"""unk_token""": """<unk>"""}
lowerCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__a ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__a ) )
def a__ ( self: str , **__a: str )-> Optional[int]:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **__a )
def a__ ( self: Any , __a: Any )-> Optional[int]:
lowerCamelCase : Optional[Any] = """adapt react readapt apt"""
lowerCamelCase : List[Any] = """adapt react readapt apt"""
return input_text, output_text
def a__ ( self: Optional[int] )-> Optional[Any]:
lowerCamelCase : Any = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase : Union[str, Any] = """adapt react readapt apt"""
lowerCamelCase : str = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split()
lowerCamelCase : Dict = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
lowerCamelCase : List[str] = tokens + [tokenizer.unk_token]
lowerCamelCase : Optional[Any] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
| 713
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self: Optional[Any] , __a: Union[str, Any] , __a: Optional[Any]=13 , __a: Optional[Any]=32 , __a: Dict=3 , __a: int=4 , __a: Dict=[10, 20, 30, 40] , __a: int=[2, 2, 3, 2] , __a: Any=True , __a: List[Any]=True , __a: Any=37 , __a: Optional[int]="gelu" , __a: List[str]=10 , __a: Optional[int]=0.02 , __a: Dict=["stage2", "stage3", "stage4"] , __a: List[str]=[2, 3, 4] , __a: List[str]=None , )-> Union[str, Any]:
lowerCamelCase : Optional[int] = parent
lowerCamelCase : Optional[int] = batch_size
lowerCamelCase : Any = image_size
lowerCamelCase : Tuple = num_channels
lowerCamelCase : str = num_stages
lowerCamelCase : List[str] = hidden_sizes
lowerCamelCase : str = depths
lowerCamelCase : Dict = is_training
lowerCamelCase : Optional[Any] = use_labels
lowerCamelCase : List[str] = intermediate_size
lowerCamelCase : List[str] = hidden_act
lowerCamelCase : List[str] = num_labels
lowerCamelCase : Union[str, Any] = initializer_range
lowerCamelCase : List[Any] = out_features
lowerCamelCase : Optional[Any] = out_indices
lowerCamelCase : int = scope
def a__ ( self: str )-> Optional[Any]:
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Dict = None
if self.use_labels:
lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase : Any = self.get_config()
return config, pixel_values, labels
def a__ ( self: Dict )-> Union[str, Any]:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a__ ( self: Optional[Any] , __a: List[Any] , __a: Any , __a: int )-> List[Any]:
lowerCamelCase : Optional[int] = ConvNextModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ ( self: int , __a: Union[str, Any] , __a: List[Any] , __a: Tuple )-> Optional[int]:
lowerCamelCase : str = ConvNextForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self: List[Any] , __a: Any , __a: Optional[int] , __a: Tuple )-> List[str]:
lowerCamelCase : List[str] = ConvNextBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCamelCase : Tuple = None
lowerCamelCase : List[str] = ConvNextBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self: Optional[Any] )-> Any:
lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = config_and_inputs
lowerCamelCase : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : int =(
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
snake_case__ : str =(
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
snake_case__ : Union[str, Any] =True
snake_case__ : Optional[int] =False
snake_case__ : Tuple =False
snake_case__ : Union[str, Any] =False
snake_case__ : Tuple =False
def a__ ( self: Optional[Any] )-> Union[str, Any]:
lowerCamelCase : Tuple = ConvNextModelTester(self )
lowerCamelCase : List[Any] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def a__ ( self: Optional[int] )-> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self: Optional[int] )-> Optional[Any]:
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def a__ ( self: int )-> Dict:
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def a__ ( self: Dict )-> Optional[Any]:
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def a__ ( self: int )-> List[Any]:
pass
def a__ ( self: Union[str, Any] )-> int:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Any = model_class(__a )
lowerCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
lowerCamelCase : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def a__ ( self: Optional[int] )-> str:
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: str )-> int:
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def a__ ( self: int )-> Optional[int]:
def check_hidden_states_output(__a: Tuple , __a: int , __a: Tuple ):
lowerCamelCase : str = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase : Tuple = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__a ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase , lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : List[Any] = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Tuple = True
check_hidden_states_output(__a , __a , __a )
def a__ ( self: Dict )-> Optional[Any]:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def a__ ( self: Optional[Any] )-> Tuple:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : str = ConvNextModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case ( ) -> Optional[int]:
lowerCamelCase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def a__ ( self: Dict )-> Union[str, Any]:
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def a__ ( self: List[str] )-> Dict:
lowerCamelCase : Tuple = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(__a )
lowerCamelCase : Dict = self.default_image_processor
lowerCamelCase : Union[str, Any] = prepare_img()
lowerCamelCase : Optional[Any] = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
lowerCamelCase : Any = model(**__a )
# verify the logits
lowerCamelCase : Union[str, Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase : Tuple = torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
@require_torch
class A__ ( unittest.TestCase , __lowercase):
"""simple docstring"""
snake_case__ : Union[str, Any] =(ConvNextBackbone,) if is_torch_available() else ()
snake_case__ : Optional[Any] =ConvNextConfig
snake_case__ : Optional[Any] =False
def a__ ( self: List[str] )-> int:
lowerCamelCase : Dict = ConvNextModelTester(self )
| 42
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
__lowerCamelCase :Any = None
__lowerCamelCase :List[Any] = logging.get_logger(__name__)
__lowerCamelCase :int = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'}
__lowerCamelCase :int = {
'vocab_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model',
},
'tokenizer_file': {
'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json',
},
}
__lowerCamelCase :Optional[Any] = {
'google/rembert': 256,
}
__lowerCamelCase :Tuple = '▁'
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Optional[Any] =VOCAB_FILES_NAMES
snake_case__ : Optional[int] =PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Optional[int] =RemBertTokenizer
def __init__( self: Union[str, Any] , __a: Tuple=None , __a: Any=None , __a: str=True , __a: List[str]=True , __a: str=False , __a: Tuple="[CLS]" , __a: Optional[Any]="[SEP]" , __a: Union[str, Any]="<unk>" , __a: List[str]="[SEP]" , __a: str="<pad>" , __a: str="[CLS]" , __a: List[str]="[MASK]" , **__a: Dict , )-> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase : Dict = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
super().__init__(
__a , tokenizer_file=__a , do_lower_case=__a , remove_space=__a , keep_accents=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , **__a , )
lowerCamelCase : Optional[Any] = do_lower_case
lowerCamelCase : str = remove_space
lowerCamelCase : Any = keep_accents
lowerCamelCase : Optional[Any] = vocab_file
lowerCamelCase : Optional[int] = False if not self.vocab_file else True
def a__ ( self: str , __a: List[int] , __a: Optional[List[int]] = None )-> List[int]:
lowerCamelCase : Optional[Any] = [self.sep_token_id]
lowerCamelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a__ ( self: Optional[Any] , __a: List[int] , __a: Optional[List[int]] = None , __a: bool = False )-> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__a )) + [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1]
def a__ ( self: Dict , __a: List[int] , __a: Optional[List[int]] = None )-> List[int]:
lowerCamelCase : Union[str, Any] = [self.sep_token_id]
lowerCamelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self: List[str] , __a: str , __a: Optional[str] = None )-> Tuple[str]:
if not os.path.isdir(__a ):
logger.error("""Vocabulary path ({}) should be a directory""".format(__a ) )
return
lowerCamelCase : Optional[Any] = os.path.join(
__a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ):
copyfile(self.vocab_file , __a )
return (out_vocab_file,)
| 714
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Optional[int] = logging.get_logger(__name__)
__lowerCamelCase :List[str] = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Optional[Any] ='''realm'''
def __init__( self: Union[str, Any] , __a: List[Any]=30_522 , __a: List[Any]=768 , __a: List[Any]=128 , __a: Union[str, Any]=12 , __a: Union[str, Any]=12 , __a: Optional[Any]=8 , __a: Dict=3_072 , __a: List[Any]="gelu_new" , __a: List[Any]=0.1 , __a: Tuple=0.1 , __a: Optional[Any]=512 , __a: Optional[int]=2 , __a: str=0.02 , __a: int=1e-1_2 , __a: Optional[Any]=256 , __a: Any=10 , __a: Dict=1e-3 , __a: Optional[Any]=5 , __a: Dict=320 , __a: Tuple=13_353_718 , __a: List[Any]=5_000 , __a: Dict=1 , __a: int=0 , __a: Dict=2 , **__a: List[str] , )-> Any:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
# Common config
lowerCamelCase : Optional[Any] = vocab_size
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : Dict = hidden_size
lowerCamelCase : Dict = retriever_proj_size
lowerCamelCase : Optional[Any] = num_hidden_layers
lowerCamelCase : List[str] = num_attention_heads
lowerCamelCase : Tuple = num_candidates
lowerCamelCase : int = intermediate_size
lowerCamelCase : Dict = hidden_act
lowerCamelCase : List[str] = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Optional[int] = initializer_range
lowerCamelCase : Dict = type_vocab_size
lowerCamelCase : Optional[Any] = layer_norm_eps
# Reader config
lowerCamelCase : List[str] = span_hidden_size
lowerCamelCase : Dict = max_span_width
lowerCamelCase : Optional[Any] = reader_layer_norm_eps
lowerCamelCase : Optional[int] = reader_beam_size
lowerCamelCase : List[Any] = reader_seq_len
# Retrieval config
lowerCamelCase : int = num_block_records
lowerCamelCase : Dict = searcher_beam_size
| 42
| 0
|
"""simple docstring"""
from math import factorial
class A__ :
"""simple docstring"""
def __init__( self: Dict , __a: Tuple , __a: Optional[Any] )-> str:
lowerCamelCase : Optional[Any] = real
if isinstance(__a , __a ):
lowerCamelCase : Tuple = [1] * rank
else:
lowerCamelCase : Any = rank
def __repr__( self: int )-> Any:
return (
f'{self.real}+'
f'{"+".join(str(__a )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'
)
def a__ ( self: List[Any] )-> Union[str, Any]:
lowerCamelCase : Tuple = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , __a )
def __add__( self: Optional[Any] , __a: int )-> Any:
if not isinstance(__a , __a ):
return Dual(self.real + other , self.duals )
lowerCamelCase : Optional[Any] = self.duals.copy()
lowerCamelCase : Any = other.duals.copy()
if len(__a ) > len(__a ):
o_dual.extend([1] * (len(__a ) - len(__a )) )
elif len(__a ) < len(__a ):
s_dual.extend([1] * (len(__a ) - len(__a )) )
lowerCamelCase : Optional[Any] = []
for i in range(len(__a ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , __a )
snake_case__ : List[str] =__add__
def __sub__( self: str , __a: List[str] )-> Any:
return self + other * -1
def __mul__( self: Optional[int] , __a: List[Any] )-> List[Any]:
if not isinstance(__a , __a ):
lowerCamelCase : Tuple = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , __a )
lowerCamelCase : Any = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , __a )
snake_case__ : str =__mul__
def __truediv__( self: Optional[Any] , __a: Any )-> List[Any]:
if not isinstance(__a , __a ):
lowerCamelCase : List[str] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , __a )
raise ValueError
def __floordiv__( self: List[str] , __a: Optional[Any] )-> Tuple:
if not isinstance(__a , __a ):
lowerCamelCase : List[Any] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , __a )
raise ValueError
def __pow__( self: List[str] , __a: Union[str, Any] )-> List[Any]:
if n < 0 or isinstance(__a , __a ):
raise ValueError("""power must be a positive integer""" )
if n == 0:
return 1
if n == 1:
return self
lowerCamelCase : int = self
for _ in range(n - 1 ):
x *= self
return x
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ) -> Optional[int]:
if not callable(UpperCamelCase__ ):
raise ValueError("""differentiate() requires a function as input for func""" )
if not isinstance(UpperCamelCase__ , (float, int) ):
raise ValueError("""differentiate() requires a float as input for position""" )
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError("""differentiate() requires an int as input for order""" )
lowerCamelCase : Union[str, Any] = Dual(UpperCamelCase__ , 1 )
lowerCamelCase : int = func(UpperCamelCase__ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def snake_case ( UpperCamelCase__ : Any ) -> Tuple:
return y**2 * y**4
print(differentiate(f, 9, 2))
| 715
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Tuple = logging.get_logger(__name__)
__lowerCamelCase :Any = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple ='''glpn'''
def __init__( self: Dict , __a: List[str]=3 , __a: Optional[int]=4 , __a: Dict=[2, 2, 2, 2] , __a: str=[8, 4, 2, 1] , __a: Optional[int]=[32, 64, 160, 256] , __a: Dict=[7, 3, 3, 3] , __a: Dict=[4, 2, 2, 2] , __a: Optional[Any]=[1, 2, 5, 8] , __a: Tuple=[4, 4, 4, 4] , __a: int="gelu" , __a: Union[str, Any]=0.0 , __a: str=0.0 , __a: Union[str, Any]=0.02 , __a: str=0.1 , __a: Union[str, Any]=1e-6 , __a: Any=64 , __a: Dict=10 , __a: Union[str, Any]=-1 , **__a: Optional[Any] , )-> Dict:
super().__init__(**__a )
lowerCamelCase : Dict = num_channels
lowerCamelCase : Any = num_encoder_blocks
lowerCamelCase : Dict = depths
lowerCamelCase : List[str] = sr_ratios
lowerCamelCase : Dict = hidden_sizes
lowerCamelCase : Tuple = patch_sizes
lowerCamelCase : Optional[int] = strides
lowerCamelCase : Optional[Any] = mlp_ratios
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : List[str] = hidden_act
lowerCamelCase : Any = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : List[Any] = initializer_range
lowerCamelCase : Dict = drop_path_rate
lowerCamelCase : Any = layer_norm_eps
lowerCamelCase : Optional[Any] = decoder_hidden_size
lowerCamelCase : Tuple = max_depth
lowerCamelCase : Optional[Any] = head_in_index
| 42
| 0
|
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__lowerCamelCase :Any = 'src/diffusers'
__lowerCamelCase :List[str] = '.'
# This is to make sure the diffusers module imported is the one in the repo.
__lowerCamelCase :Optional[int] = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
__lowerCamelCase :int = spec.loader.load_module()
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] ) -> List[str]:
return line.startswith(UpperCamelCase__ ) or len(UpperCamelCase__ ) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""" , UpperCamelCase__ ) is not None
def snake_case ( UpperCamelCase__ : Any ) -> Optional[int]:
lowerCamelCase : Dict = object_name.split(""".""" )
lowerCamelCase : Union[str, Any] = 0
# First let's find the module where our object lives.
lowerCamelCase : Dict = parts[i]
while i < len(UpperCamelCase__ ) and not os.path.isfile(os.path.join(UpperCamelCase__ , F'{module}.py' ) ):
i += 1
if i < len(UpperCamelCase__ ):
lowerCamelCase : Any = os.path.join(UpperCamelCase__ , parts[i] )
if i >= len(UpperCamelCase__ ):
raise ValueError(F'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(UpperCamelCase__ , F'{module}.py' ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase : Optional[int] = f.readlines()
# Now let's find the class / func in the code!
lowerCamelCase : List[Any] = """"""
lowerCamelCase : Union[str, Any] = 0
for name in parts[i + 1 :]:
while (
line_index < len(UpperCamelCase__ ) and re.search(RF'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(UpperCamelCase__ ):
raise ValueError(F' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
lowerCamelCase : str = line_index
while line_index < len(UpperCamelCase__ ) and _should_continue(lines[line_index] , UpperCamelCase__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowerCamelCase : Optional[Any] = lines[start_index:line_index]
return "".join(UpperCamelCase__ )
__lowerCamelCase :Any = re.compile(r'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
__lowerCamelCase :int = re.compile(r'^\s*(\S+)->(\S+)(\s+.*|$)')
__lowerCamelCase :List[Any] = re.compile(r'<FILL\s+[^>]*>')
def snake_case ( UpperCamelCase__ : Any ) -> Union[str, Any]:
lowerCamelCase : Optional[Any] = code.split("""\n""" )
lowerCamelCase : Any = 0
while idx < len(UpperCamelCase__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(UpperCamelCase__ ):
return re.search(R"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def snake_case ( UpperCamelCase__ : int ) -> Optional[int]:
lowerCamelCase : Any = len(get_indent(UpperCamelCase__ ) ) > 0
if has_indent:
lowerCamelCase : int = F'class Bla:\n{code}'
lowerCamelCase : List[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=UpperCamelCase__ )
lowerCamelCase : Optional[int] = black.format_str(UpperCamelCase__ , mode=UpperCamelCase__ )
lowerCamelCase : Dict = style_docstrings_in_code(UpperCamelCase__ )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def snake_case ( UpperCamelCase__ : Any , UpperCamelCase__ : Dict=False ) -> List[str]:
with open(UpperCamelCase__ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
lowerCamelCase : List[str] = f.readlines()
lowerCamelCase : List[str] = []
lowerCamelCase : Any = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(UpperCamelCase__ ):
lowerCamelCase : Optional[Any] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
lowerCamelCase : List[Any] = search.groups()
lowerCamelCase : Union[str, Any] = find_code_in_diffusers(UpperCamelCase__ )
lowerCamelCase : str = get_indent(UpperCamelCase__ )
lowerCamelCase : Dict = line_index + 1 if indent == theoretical_indent else line_index + 2
lowerCamelCase : Optional[Any] = theoretical_indent
lowerCamelCase : int = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
lowerCamelCase : Union[str, Any] = True
while line_index < len(UpperCamelCase__ ) and should_continue:
line_index += 1
if line_index >= len(UpperCamelCase__ ):
break
lowerCamelCase : Any = lines[line_index]
lowerCamelCase : Any = _should_continue(UpperCamelCase__ , UpperCamelCase__ ) and re.search(F'^{indent}# End copy' , UpperCamelCase__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowerCamelCase : Any = lines[start_index:line_index]
lowerCamelCase : Any = """""".join(UpperCamelCase__ )
# Remove any nested `Copied from` comments to avoid circular copies
lowerCamelCase : Any = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(UpperCamelCase__ ) is None]
lowerCamelCase : Dict = """\n""".join(UpperCamelCase__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(UpperCamelCase__ ) > 0:
lowerCamelCase : Union[str, Any] = replace_pattern.replace("""with""" , """""" ).split(""",""" )
lowerCamelCase : Dict = [_re_replace_pattern.search(UpperCamelCase__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
lowerCamelCase : Optional[int] = pattern.groups()
lowerCamelCase : List[str] = re.sub(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if option.strip() == "all-casing":
lowerCamelCase : List[Any] = re.sub(obja.lower() , obja.lower() , UpperCamelCase__ )
lowerCamelCase : List[Any] = re.sub(obja.upper() , obja.upper() , UpperCamelCase__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
lowerCamelCase : Optional[int] = blackify(lines[start_index - 1] + theoretical_code )
lowerCamelCase : int = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
lowerCamelCase : Optional[Any] = lines[:start_index] + [theoretical_code] + lines[line_index:]
lowerCamelCase : List[str] = start_index + 1
if overwrite and len(UpperCamelCase__ ) > 0:
# Warn the user a file has been modified.
print(F'Detected changes, rewriting {filename}.' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCamelCase__ )
return diffs
def snake_case ( UpperCamelCase__ : bool = False ) -> Tuple:
lowerCamelCase : int = glob.glob(os.path.join(UpperCamelCase__ , """**/*.py""" ) , recursive=UpperCamelCase__ )
lowerCamelCase : List[str] = []
for filename in all_files:
lowerCamelCase : Any = is_copy_consistent(UpperCamelCase__ , UpperCamelCase__ )
diffs += [F'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(UpperCamelCase__ ) > 0:
lowerCamelCase : Tuple = """\n""".join(UpperCamelCase__ )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
__lowerCamelCase :Optional[int] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
__lowerCamelCase :str = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 716
|
"""simple docstring"""
from __future__ import annotations
import math
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : int ) -> float:
lowerCamelCase : Dict = u
for i in range(1 , UpperCamelCase__ ):
lowerCamelCase : List[str] = temp * (u - i)
return temp
def snake_case ( ) -> None:
lowerCamelCase : List[Any] = int(input("""enter the numbers of values: """ ) )
lowerCamelCase : list[list[float]] = []
for _ in range(UpperCamelCase__ ):
y.append([] )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
y[i].append(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = 0
print("""enter the values of parameters in a list: """ )
lowerCamelCase : Any = list(map(UpperCamelCase__ , input().split() ) )
print("""enter the values of corresponding parameters: """ )
for i in range(UpperCamelCase__ ):
lowerCamelCase : int = float(input() )
lowerCamelCase : Dict = int(input("""enter the value to interpolate: """ ) )
lowerCamelCase : List[Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , UpperCamelCase__ ):
for j in range(n - i ):
lowerCamelCase : str = y[j + 1][i - 1] - y[j][i - 1]
lowerCamelCase : Any = y[0][0]
for i in range(1 , UpperCamelCase__ ):
summ += (ucal(UpperCamelCase__ , UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 42
| 0
|
"""simple docstring"""
import numpy as np
def snake_case ( UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float ) -> np.ndarray:
return np.where(vector > 0 , UpperCamelCase__ , (alpha * (np.exp(UpperCamelCase__ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__lowerCamelCase :str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[Any] = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
__lowerCamelCase :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 0
|
"""simple docstring"""
from collections import defaultdict
class A__ :
"""simple docstring"""
def __init__( self: int , __a: Tuple , __a: Dict )-> Optional[int]:
lowerCamelCase : Union[str, Any] = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
lowerCamelCase : int = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(__a ) )
]
lowerCamelCase : Optional[Any] = defaultdict(__a ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
lowerCamelCase : Dict = (1 << len(__a )) - 1
def a__ ( self: Union[str, Any] , __a: str , __a: List[Any] )-> List[str]:
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
lowerCamelCase : Dict = self.count_ways_until(__a , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
lowerCamelCase : Union[str, Any] = total_ways_util
return self.dp[mask][task_no]
def a__ ( self: Tuple , __a: List[str] )-> Union[str, Any]:
# Store the list of persons for each task
for i in range(len(__a ) ):
for j in task_performed[i]:
self.task[j].append(__a )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
__lowerCamelCase :Optional[Any] = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
__lowerCamelCase :List[Any] = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 718
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase :Dict = logging.get_logger()
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : LevitConfig , UpperCamelCase__ : Path , UpperCamelCase__ : bool = True ) -> Dict:
print(F'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
lowerCamelCase : Optional[Any] = timm.create_model("""levit_128s""" , pretrained=UpperCamelCase__ )
else:
lowerCamelCase : Dict = timm.create_model("""levit_128""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 192:
lowerCamelCase : Tuple = timm.create_model("""levit_192""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 256:
lowerCamelCase : Optional[int] = timm.create_model("""levit_256""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 384:
lowerCamelCase : Dict = timm.create_model("""levit_384""" , pretrained=UpperCamelCase__ )
from_model.eval()
lowerCamelCase : Optional[Any] = LevitForImageClassificationWithTeacher(UpperCamelCase__ ).eval()
lowerCamelCase : Tuple = OrderedDict()
lowerCamelCase : Optional[Any] = from_model.state_dict()
lowerCamelCase : str = list(from_model.state_dict().keys() )
lowerCamelCase : List[Any] = list(our_model.state_dict().keys() )
print(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for i in range(len(UpperCamelCase__ ) ):
lowerCamelCase : str = weights[og_keys[i]]
our_model.load_state_dict(UpperCamelCase__ )
lowerCamelCase : int = torch.randn((2, 3, 224, 224) )
lowerCamelCase : Any = from_model(UpperCamelCase__ )
lowerCamelCase : List[Any] = our_model(UpperCamelCase__ ).logits
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ ), "The model logits don't match the original one."
lowerCamelCase : Dict = name
print(UpperCamelCase__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowerCamelCase : Optional[int] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'Pushed {checkpoint_name}' )
def snake_case ( UpperCamelCase__ : Path , UpperCamelCase__ : str = None , UpperCamelCase__ : bool = True ) -> Optional[int]:
lowerCamelCase : Optional[Any] = """imagenet-1k-id2label.json"""
lowerCamelCase : List[Any] = 1000
lowerCamelCase : Dict = (1, num_labels)
lowerCamelCase : List[Any] = """huggingface/label-files"""
lowerCamelCase : Optional[int] = num_labels
lowerCamelCase : List[str] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase : Any = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase : List[Any] = idalabel
lowerCamelCase : str = {v: k for k, v in idalabel.items()}
lowerCamelCase : Tuple = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ )
lowerCamelCase : Optional[int] = {
"""levit-128S""": 128,
"""levit-128""": 128,
"""levit-192""": 192,
"""levit-256""": 256,
"""levit-384""": 384,
}
lowerCamelCase : List[Any] = {
"""levit-128S""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-128""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-192""": ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-256""": ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-384""": ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return config, expected_shape
if __name__ == "__main__":
__lowerCamelCase :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
__lowerCamelCase :List[Any] = parser.parse_args()
__lowerCamelCase :Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 42
| 0
|
"""simple docstring"""
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def snake_case ( *UpperCamelCase__ : Any ) -> Tuple:
with open(UpperCamelCase__ , """r""" ) as fh:
fcntl.flock(UpperCamelCase__ , fcntl.LOCK_EX )
try:
print(*UpperCamelCase__ )
finally:
fcntl.flock(UpperCamelCase__ , fcntl.LOCK_UN )
__lowerCamelCase :Union[str, Any] = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
__lowerCamelCase :Tuple = torch.device('cuda', local_rank)
__lowerCamelCase :int = socket.gethostname()
__lowerCamelCase :Tuple = f"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group('nccl')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__lowerCamelCase :Union[str, Any] = dist.get_rank()
__lowerCamelCase :str = dist.get_world_size()
printflock(f"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(f"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(f"""{gpu} is broken""")
raise
| 719
|
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple =(KDPMaDiscreteScheduler,)
snake_case__ : Tuple =10
def a__ ( self: List[Any] , **__a: Optional[int] )-> Union[str, Any]:
lowerCamelCase : int = {
"""num_train_timesteps""": 1_100,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**__a )
return config
def a__ ( self: Union[str, Any] )-> Any:
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__a )
def a__ ( self: str )-> int:
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def a__ ( self: int )-> Union[str, Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a )
def a__ ( self: List[Any] )-> List[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def a__ ( self: Union[str, Any] )-> int:
lowerCamelCase : List[str] = self.scheduler_classes[0]
lowerCamelCase : Union[str, Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
lowerCamelCase : List[str] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase : Dict = self.dummy_model()
lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase : List[Any] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : Optional[Any] = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[int] = model(__a , __a )
lowerCamelCase : Tuple = scheduler.step(__a , __a , __a )
lowerCamelCase : Optional[Any] = output.prev_sample
lowerCamelCase : List[str] = torch.sum(torch.abs(__a ) )
lowerCamelCase : Tuple = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4e-0_7 ) < 1e-2
assert abs(result_mean.item() - 6.1_1_1_2e-1_0 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7 ) < 1e-2
assert abs(result_mean.item() - 0.00_02 ) < 1e-3
def a__ ( self: Any )-> Any:
if torch_device == "mps":
return
lowerCamelCase : Dict = self.scheduler_classes[0]
lowerCamelCase : Dict = self.get_scheduler_config()
lowerCamelCase : int = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase : List[Any] = self.dummy_model()
lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase : Optional[int] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : Dict = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[Any] = model(__a , __a )
lowerCamelCase : Tuple = scheduler.step(__a , __a , __a )
lowerCamelCase : str = output.prev_sample
lowerCamelCase : Tuple = torch.sum(torch.abs(__a ) )
lowerCamelCase : Tuple = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
def a__ ( self: Optional[Any] )-> List[Any]:
if torch_device == "mps":
return
lowerCamelCase : Any = self.scheduler_classes[0]
lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
lowerCamelCase : Optional[Any] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
lowerCamelCase : Union[str, Any] = self.dummy_model()
lowerCamelCase : List[str] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCamelCase : Union[str, Any] = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[int] = model(__a , __a )
lowerCamelCase : int = scheduler.step(__a , __a , __a )
lowerCamelCase : int = output.prev_sample
lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(__a ) )
lowerCamelCase : int = torch.mean(torch.abs(__a ) )
if str(__a ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
| 42
| 0
|
"""simple docstring"""
from __future__ import annotations
from math import gcd
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : int = 2 , UpperCamelCase__ : int = 1 , UpperCamelCase__ : int = 3 , ) -> int | None:
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError("""The input value cannot be less than 2""" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
return (pow(UpperCamelCase__ , 2 ) + step) % modulus
for _ in range(UpperCamelCase__ ):
# These track the position within the cycle detection logic.
lowerCamelCase : List[Any] = seed
lowerCamelCase : Dict = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
lowerCamelCase : Tuple = rand_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Any = rand_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : int = rand_fn(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
lowerCamelCase : List[Any] = gcd(hare - tortoise , UpperCamelCase__ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
lowerCamelCase : Optional[int] = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
__lowerCamelCase :Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'num',
type=int,
help='The value to find a divisor of',
)
parser.add_argument(
'--attempts',
type=int,
default=3,
help='The number of attempts before giving up',
)
__lowerCamelCase :Tuple = parser.parse_args()
__lowerCamelCase :Dict = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f"""{args.num} is probably prime""")
else:
__lowerCamelCase :Any = args.num // divisor
print(f"""{args.num} = {divisor} * {quotient}""")
| 720
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str =StableDiffusionXLImgaImgPipeline
snake_case__ : Any =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
snake_case__ : Optional[int] =PipelineTesterMixin.required_optional_params - {'''latents'''}
snake_case__ : Dict =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case__ : Tuple =IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case__ : List[str] =IMAGE_TO_IMAGE_IMAGE_PARAMS
def a__ ( self: List[str] )-> int:
torch.manual_seed(0 )
lowerCamelCase : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=__a , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
lowerCamelCase : Any = EulerDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
lowerCamelCase : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=32 , )
lowerCamelCase : Dict = CLIPTextModel(__a )
lowerCamelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__a )
lowerCamelCase : Dict = CLIPTextModelWithProjection(__a )
lowerCamelCase : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__a )
lowerCamelCase : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def a__ ( self: Any , __a: str , __a: Tuple=0 )-> Union[str, Any]:
lowerCamelCase : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
lowerCamelCase : Any = image / 2 + 0.5
if str(__a ).startswith("""mps""" ):
lowerCamelCase : Dict = torch.manual_seed(__a )
else:
lowerCamelCase : Tuple = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.75,
}
return inputs
def a__ ( self: Dict )-> Optional[Any]:
lowerCamelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase : Union[str, Any] = self.get_dummy_components()
lowerCamelCase : Optional[int] = StableDiffusionXLImgaImgPipeline(**__a )
lowerCamelCase : int = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Optional[Any] = self.get_dummy_inputs(__a )
lowerCamelCase : Optional[int] = sd_pipe(**__a ).images
lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase : Any = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self: Optional[int] )-> Union[str, Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def a__ ( self: Optional[Any] )-> str:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def a__ ( self: List[str] )-> Optional[Any]:
pass
def a__ ( self: List[Any] )-> Union[str, Any]:
lowerCamelCase : Tuple = self.get_dummy_components()
lowerCamelCase : Union[str, Any] = StableDiffusionXLImgaImgPipeline(**__a )
lowerCamelCase : str = sd_pipe.to(__a )
lowerCamelCase : Any = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
# forward without prompt embeds
lowerCamelCase : Dict = self.get_dummy_inputs(__a )
lowerCamelCase : Any = 3 * ["""this is a negative prompt"""]
lowerCamelCase : Optional[int] = negative_prompt
lowerCamelCase : Tuple = 3 * [inputs["""prompt"""]]
lowerCamelCase : List[Any] = sd_pipe(**__a )
lowerCamelCase : Optional[int] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCamelCase : Tuple = self.get_dummy_inputs(__a )
lowerCamelCase : List[Any] = 3 * ["""this is a negative prompt"""]
lowerCamelCase : Tuple = 3 * [inputs.pop("""prompt""" )]
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Union[str, Any] = sd_pipe.encode_prompt(__a , negative_prompt=__a )
lowerCamelCase : int = sd_pipe(
**__a , prompt_embeds=__a , negative_prompt_embeds=__a , pooled_prompt_embeds=__a , negative_pooled_prompt_embeds=__a , )
lowerCamelCase : Union[str, Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Dict )-> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self: Union[str, Any] , __a: Any , __a: Any="cpu" , __a: str=torch.floataa , __a: Any=0 )-> Optional[Any]:
lowerCamelCase : Optional[Any] = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : List[Any] = np.random.RandomState(__a ).standard_normal((1, 4, 64, 64) )
lowerCamelCase : List[str] = torch.from_numpy(__a ).to(device=__a , dtype=__a )
lowerCamelCase : int = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def a__ ( self: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Optional[int] = self.get_inputs(__a )
lowerCamelCase : Optional[Any] = pipe(**__a ).images
lowerCamelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCamelCase : List[str] = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 42
| 0
|
"""simple docstring"""
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
__lowerCamelCase :int = True
except ImportError:
__lowerCamelCase :Tuple = False
__lowerCamelCase :Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
def snake_case ( UpperCamelCase__ : Namespace ) -> Optional[int]:
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class A__ ( __lowercase):
"""simple docstring"""
@staticmethod
def a__ ( __a: ArgumentParser )-> Tuple:
lowerCamelCase : int = parser.add_parser("""add-new-model""" )
add_new_model_parser.add_argument("""--testing""" , action="""store_true""" , help="""If in testing mode.""" )
add_new_model_parser.add_argument("""--testing_file""" , type=__a , help="""Configuration file on which to run.""" )
add_new_model_parser.add_argument(
"""--path""" , type=__a , help="""Path to cookiecutter. Should only be used for testing purposes.""" )
add_new_model_parser.set_defaults(func=__a )
def __init__( self: List[Any] , __a: bool , __a: str , __a: Dict=None , *__a: Optional[Any] )-> int:
lowerCamelCase : Any = testing
lowerCamelCase : Any = testing_file
lowerCamelCase : Optional[Any] = path
def a__ ( self: int )-> Optional[int]:
warnings.warn(
"""The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. """
"""It is not actively maintained anymore, so might give a result that won't pass all tests and quality """
"""checks, you should use `transformers-cli add-new-model-like` instead.""" )
if not _has_cookiecutter:
raise ImportError(
"""Model creation dependencies are required to use the `add_new_model` command. Install them by running """
"""the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n""" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
lowerCamelCase : Optional[Any] = [directory for directory in os.listdir() if """cookiecutter-template-""" == directory[:22]]
if len(__a ) > 0:
raise ValueError(
"""Several directories starting with `cookiecutter-template-` in current working directory. """
"""Please clean your directory by removing all folders starting with `cookiecutter-template-` or """
"""change your working directory.""" )
lowerCamelCase : List[Any] = (
Path(__a ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
lowerCamelCase : Optional[int] = path_to_transformer_root / """templates""" / """adding_a_new_model"""
# Execute cookiecutter
if not self._testing:
cookiecutter(str(__a ) )
else:
with open(self._testing_file , """r""" ) as configuration_file:
lowerCamelCase : Union[str, Any] = json.load(__a )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=__a , extra_context=__a , )
lowerCamelCase : Dict = [directory for directory in os.listdir() if """cookiecutter-template-""" in directory[:22]][0]
# Retrieve configuration
with open(directory + """/configuration.json""" , """r""" ) as configuration_file:
lowerCamelCase : List[str] = json.load(__a )
lowerCamelCase : Union[str, Any] = configuration["""lowercase_modelname"""]
lowerCamelCase : List[str] = configuration["""generate_tensorflow_pytorch_and_flax"""]
os.remove(f'{directory}/configuration.json' )
lowerCamelCase : Optional[int] = """PyTorch""" in generate_tensorflow_pytorch_and_flax
lowerCamelCase : List[str] = """TensorFlow""" in generate_tensorflow_pytorch_and_flax
lowerCamelCase : Union[str, Any] = """Flax""" in generate_tensorflow_pytorch_and_flax
lowerCamelCase : List[Any] = f'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(__a , exist_ok=__a )
os.makedirs(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}' , exist_ok=__a )
# Tests require submodules as they have parent imports
with open(f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' , """w""" ):
pass
shutil.move(
f'{directory}/__init__.py' , f'{model_dir}/__init__.py' , )
shutil.move(
f'{directory}/configuration_{lowercase_model_name}.py' , f'{model_dir}/configuration_{lowercase_model_name}.py' , )
def remove_copy_lines(__a: int ):
with open(__a , """r""" ) as f:
lowerCamelCase : List[str] = f.readlines()
with open(__a , """w""" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(__a )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_{lowercase_model_name}.py' , f'{model_dir}/modeling_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_tf_{lowercase_model_name}.py' , f'{model_dir}/modeling_tf_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_tf_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/modeling_flax_{lowercase_model_name}.py' , f'{model_dir}/modeling_flax_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/test_modeling_flax_{lowercase_model_name}.py' , f'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' , )
else:
os.remove(f'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(f'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
f'{directory}/{lowercase_model_name}.md' , f'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' , )
shutil.move(
f'{directory}/tokenization_{lowercase_model_name}.py' , f'{model_dir}/tokenization_{lowercase_model_name}.py' , )
shutil.move(
f'{directory}/tokenization_fast_{lowercase_model_name}.py' , f'{model_dir}/tokenization_{lowercase_model_name}_fast.py' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(__a: str , __a: str , __a: List[str] ):
# Create temp file
lowerCamelCase : List[Any] = mkstemp()
lowerCamelCase : Dict = False
with fdopen(__a , """w""" ) as new_file:
with open(__a ) as old_file:
for line in old_file:
new_file.write(__a )
if line_to_copy_below in line:
lowerCamelCase : List[str] = True
for line_to_copy in lines_to_copy:
new_file.write(__a )
if not line_found:
raise ValueError(f'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(__a , __a )
# Remove original file
remove(__a )
# Move new file
move(__a , __a )
def skip_units(__a: Union[str, Any] ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(__a: Union[str, Any] ):
with open(__a ) as datafile:
lowerCamelCase : Tuple = []
lowerCamelCase : Dict = False
lowerCamelCase : Optional[int] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
lowerCamelCase : Union[str, Any] = line.split("""\"""" )[1]
lowerCamelCase : Any = skip_units(__a )
elif "# Below: " in line and "##" not in line:
lowerCamelCase : List[str] = line.split("""\"""" )[1]
lowerCamelCase : List[str] = skip_units(__a )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(__a , __a , __a )
lowerCamelCase : str = []
elif "# Replace with" in line and "##" not in line:
lowerCamelCase : Dict = []
elif "##" not in line:
lines_to_copy.append(__a )
remove(__a )
replace_in_files(f'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(__a )
| 721
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class A__ :
"""simple docstring"""
def a__ ( self: Optional[int] , __a: Optional[int] , __a: Tuple , __a: Optional[int] )-> List[str]:
return None
class A__ :
"""simple docstring"""
def a__ ( self: Optional[int] , __a: Tuple , __a: str , __a: str , __a: str )-> Tuple:
return None
class A__ ( unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] =[
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def a__ ( self: Optional[Any] )-> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , """tf""" , 12 , **__a )
@require_torch
@slow
def a__ ( self: str )-> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , """pt""" , 12 , **__a )
@require_torch
@slow
def a__ ( self: Union[str, Any] )-> Dict:
from transformers import BertModel
lowerCamelCase : int = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(__a ) )
vocab_file.flush()
lowerCamelCase : Dict = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase : List[str] = BertModel(BertConfig(vocab_size=len(__a ) ) )
model.save_pretrained(__a )
self._test_export(__a , """pt""" , 12 , __a )
@require_tf
@slow
def a__ ( self: Optional[Any] )-> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase : Optional[int] = self._test_export(__a , """tf""" , 12 , **__a )
lowerCamelCase : Tuple = quantize(Path(__a ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def a__ ( self: Any )-> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase : Any = self._test_export(__a , """pt""" , 12 , **__a )
lowerCamelCase : Dict = quantize(__a )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def a__ ( self: List[Any] , __a: Optional[Any] , __a: List[Any] , __a: Union[str, Any] , __a: Optional[Any]=None , **__a: Optional[int] )-> Any:
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase : Optional[Any] = Path(__a ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__a , __a , __a , __a , __a , **__a )
return path
except Exception as e:
self.fail(__a )
@require_torch
@require_tokenizers
@slow
def a__ ( self: Tuple )-> Dict:
from transformers import BertModel
lowerCamelCase : int = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCamelCase : List[Any] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__a , __a , """pt""" )
@require_tf
@require_tokenizers
@slow
def a__ ( self: Optional[Any] )-> List[Any]:
from transformers import TFBertModel
lowerCamelCase : Union[str, Any] = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCamelCase : str = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__a , __a , """tf""" )
def a__ ( self: List[str] , __a: str , __a: Optional[Any] , __a: str )-> List[Any]:
lowerCamelCase : List[str] = FeatureExtractionPipeline(__a , __a )
lowerCamelCase : List[str] = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = infer_shapes(__a , __a )
# Assert all variables are present
self.assertEqual(len(__a ) , len(__a ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __a )
self.assertSequenceEqual(variable_names[3:] , __a )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} )
def a__ ( self: List[Any] )-> int:
lowerCamelCase : List[str] = ["""input_ids""", """attention_mask""", """token_type_ids"""]
lowerCamelCase : str = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncContiguousArgs() , __a , __a )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__a ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__a ) , set(__a ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__a , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncNonContiguousArgs() , __a , __a )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__a ) , 1 )
self.assertEqual(len(__a ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] , """input_ids""" )
def a__ ( self: Tuple )-> Tuple:
lowerCamelCase : Optional[int] = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
| 42
| 0
|
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class A__ :
"""simple docstring"""
@staticmethod
def a__ ( *__a: Optional[int] , **__a: Optional[int] )-> Optional[Any]:
pass
def snake_case ( UpperCamelCase__ : Image ) -> str:
lowerCamelCase : List[Any] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class A__ ( unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[int] =MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def a__ ( self: Optional[Any] , __a: Tuple , __a: Optional[int] , __a: Tuple )-> Union[str, Any]:
lowerCamelCase : List[Any] = DepthEstimationPipeline(model=__a , image_processor=__a )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def a__ ( self: Any , __a: List[str] , __a: List[str] )-> Optional[int]:
lowerCamelCase : Union[str, Any] = depth_estimator("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
self.assertEqual({"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )} , __a )
import datasets
lowerCamelCase : Union[str, Any] = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""" )
lowerCamelCase : Optional[int] = depth_estimator(
[
Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""http://images.cocodataset.org/val2017/000000039769.jpg""",
# RGBA
dataset[0]["""file"""],
# LA
dataset[1]["""file"""],
# L
dataset[2]["""file"""],
] )
self.assertEqual(
[
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
{"""predicted_depth""": ANY(torch.Tensor ), """depth""": ANY(Image.Image )},
] , __a , )
@require_tf
@unittest.skip("""Depth estimation is not implemented in TF""" )
def a__ ( self: Optional[Any] )-> List[str]:
pass
@slow
@require_torch
def a__ ( self: Union[str, Any] )-> Optional[Any]:
lowerCamelCase : str = """Intel/dpt-large"""
lowerCamelCase : List[Any] = pipeline("""depth-estimation""" , model=__a )
lowerCamelCase : List[str] = depth_estimator("""http://images.cocodataset.org/val2017/000000039769.jpg""" )
lowerCamelCase : Dict = hashimage(outputs["""depth"""] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].max().item() ) , 29.3_04 )
self.assertEqual(nested_simplify(outputs["""predicted_depth"""].min().item() ) , 2.6_62 )
@require_torch
def a__ ( self: Optional[Any] )-> List[str]:
# This is highly irregular to have no small tests.
self.skipTest("""There is not hf-internal-testing tiny model for either GLPN nor DPT""" )
| 700
|
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Optional[int] )-> Union[str, Any]:
lowerCamelCase : Tuple = [10, 20, 30, 40, 50, 60]
lowerCamelCase : Union[str, Any] = [2, 4, 6, 8, 10, 12]
lowerCamelCase : Union[str, Any] = 100
self.assertEqual(kp.calc_profit(__a , __a , __a ) , 210 )
def a__ ( self: str )-> str:
self.assertRaisesRegex(__a , """max_weight must greater than zero.""" )
def a__ ( self: str )-> List[Any]:
self.assertRaisesRegex(__a , """Weight can not be negative.""" )
def a__ ( self: Any )-> Dict:
self.assertRaisesRegex(__a , """Profit can not be negative.""" )
def a__ ( self: Optional[Any] )-> List[Any]:
self.assertRaisesRegex(__a , """max_weight must greater than zero.""" )
def a__ ( self: Optional[Any] )-> Tuple:
self.assertRaisesRegex(
__a , """The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 42
| 0
|
"""simple docstring"""
from collections import namedtuple
__lowerCamelCase :Dict = namedtuple('from_to', 'from_ to')
__lowerCamelCase :int = {
'cubicmeter': from_to(1, 1),
'litre': from_to(0.001, 1_000),
'kilolitre': from_to(1, 1),
'gallon': from_to(0.00_454, 264.172),
'cubicyard': from_to(0.76_455, 1.30_795),
'cubicfoot': from_to(0.028, 35.3_147),
'cup': from_to(0.000_236_588, 4_226.75),
}
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : str , UpperCamelCase__ : str ) -> float:
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'Invalid \'from_type\' value: {from_type!r} Supported values are:\n'
+ """, """.join(UpperCamelCase__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'
+ """, """.join(UpperCamelCase__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase :List[str] = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[int] = ['OwlViTFeatureExtractor']
__lowerCamelCase :List[str] = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[Any] = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__lowerCamelCase :Optional[Any] = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
__lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 702
|
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self: List[Any] , __a: List[str] , __a: Optional[int]=13 , __a: List[str]=32 , __a: int=2 , __a: List[str]=3 , __a: Union[str, Any]=16 , __a: int=[32, 64, 128] , __a: Optional[Any]=[1, 2, 1] , __a: Optional[int]=[2, 2, 4] , __a: Tuple=2 , __a: Dict=2.0 , __a: List[str]=True , __a: Optional[Any]=0.0 , __a: Any=0.0 , __a: List[Any]=0.1 , __a: List[str]="gelu" , __a: Tuple=False , __a: Union[str, Any]=True , __a: Optional[int]=0.02 , __a: Tuple=1e-5 , __a: int=True , __a: List[Any]=None , __a: Optional[int]=True , __a: Dict=10 , __a: List[str]=8 , __a: Any=["stage1", "stage2"] , __a: Union[str, Any]=[1, 2] , )-> Dict:
lowerCamelCase : Dict = parent
lowerCamelCase : Optional[Any] = batch_size
lowerCamelCase : Union[str, Any] = image_size
lowerCamelCase : Optional[int] = patch_size
lowerCamelCase : Any = num_channels
lowerCamelCase : Any = embed_dim
lowerCamelCase : Dict = hidden_sizes
lowerCamelCase : List[Any] = depths
lowerCamelCase : Tuple = num_heads
lowerCamelCase : List[Any] = window_size
lowerCamelCase : str = mlp_ratio
lowerCamelCase : str = qkv_bias
lowerCamelCase : str = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Tuple = drop_path_rate
lowerCamelCase : Dict = hidden_act
lowerCamelCase : Tuple = use_absolute_embeddings
lowerCamelCase : List[str] = patch_norm
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : str = initializer_range
lowerCamelCase : Tuple = is_training
lowerCamelCase : int = scope
lowerCamelCase : Union[str, Any] = use_labels
lowerCamelCase : List[str] = type_sequence_label_size
lowerCamelCase : str = encoder_stride
lowerCamelCase : List[str] = out_features
lowerCamelCase : Optional[int] = out_indices
def a__ ( self: Optional[Any] )-> Union[str, Any]:
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : str = None
if self.use_labels:
lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : str = self.get_config()
return config, pixel_values, labels
def a__ ( self: List[Any] )-> Optional[int]:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def a__ ( self: Tuple , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = FocalNetModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Tuple = model(__a )
lowerCamelCase : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def a__ ( self: Optional[int] , __a: Dict , __a: Tuple , __a: List[Any] )-> int:
lowerCamelCase : List[Any] = FocalNetBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Optional[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowerCamelCase : Dict = None
lowerCamelCase : Dict = FocalNetBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self: Optional[int] , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = FocalNetForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[str] = model(__a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase : List[str] = 1
lowerCamelCase : Any = FocalNetForMaskedImageModeling(__a )
model.to(__a )
model.eval()
lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : Tuple = model(__a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a__ ( self: str , __a: Optional[Any] , __a: Optional[Any] , __a: Tuple )-> str:
lowerCamelCase : Optional[Any] = self.type_sequence_label_size
lowerCamelCase : Optional[Any] = FocalNetForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : List[str] = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase : int = 1
lowerCamelCase : List[Any] = FocalNetForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : Optional[Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self: int )-> Optional[int]:
lowerCamelCase : str = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = config_and_inputs
lowerCamelCase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =(
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
snake_case__ : Optional[int] =(
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
snake_case__ : Tuple =False
snake_case__ : Dict =False
snake_case__ : Dict =False
snake_case__ : Tuple =False
snake_case__ : Optional[int] =False
def a__ ( self: Union[str, Any] )-> Optional[int]:
lowerCamelCase : List[str] = FocalNetModelTester(self )
lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__a , embed_dim=37 , has_text_modality=__a )
def a__ ( self: List[str] )-> List[str]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self: List[str] )-> Union[str, Any]:
return
def a__ ( self: Tuple )-> Tuple:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: List[Any] )-> Dict:
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def a__ ( self: List[Any] )-> Tuple:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def a__ ( self: List[str] )-> Dict:
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def a__ ( self: Optional[Any] )-> str:
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def a__ ( self: Optional[Any] )-> Dict:
pass
def a__ ( self: Optional[Any] )-> Dict:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : Any = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def a__ ( self: Tuple )-> Optional[int]:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : int = model_class(__a )
lowerCamelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Any = [*signature.parameters.keys()]
lowerCamelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def a__ ( self: str , __a: Union[str, Any] , __a: int , __a: Tuple , __a: List[str] )-> Union[str, Any]:
lowerCamelCase : List[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase : List[str] = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase : List[str] = outputs.hidden_states
lowerCamelCase : Tuple = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__a ) , __a )
# FocalNet has a different seq_length
lowerCamelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCamelCase : Optional[Any] = outputs.reshaped_hidden_states
self.assertEqual(len(__a ) , __a )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = reshaped_hidden_states[0].shape
lowerCamelCase : Tuple = (
reshaped_hidden_states[0].view(__a , __a , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def a__ ( self: Any )-> Any:
lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : List[str] = True
self.check_hidden_states_output(__a , __a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__a , __a , __a , __a )
def a__ ( self: str )-> Union[str, Any]:
lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : List[str] = 3
lowerCamelCase : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase : Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : str = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Union[str, Any] = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
@slow
def a__ ( self: Optional[int] )-> List[Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : List[str] = FocalNetModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def a__ ( self: str )-> Any:
lowerCamelCase , lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : int = _config_zero_init(__a )
for model_class in self.all_model_classes:
lowerCamelCase : int = model_class(config=__a )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class A__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def a__ ( self: Optional[int] )-> Optional[Any]:
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : Tuple = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(__a )
lowerCamelCase : Any = self.default_image_processor
lowerCamelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCamelCase : int = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
lowerCamelCase : Any = model(**__a )
# verify the logits
lowerCamelCase : Tuple = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase : List[str] = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str =(FocalNetBackbone,) if is_torch_available() else ()
snake_case__ : Optional[int] =FocalNetConfig
snake_case__ : str =False
def a__ ( self: Union[str, Any] )-> Tuple:
lowerCamelCase : str = FocalNetModelTester(self )
| 42
| 0
|
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: List[str] )-> str:
lowerCamelCase : Dict = 0
lowerCamelCase : str = [0]
lowerCamelCase : Union[str, Any] = [0]
lowerCamelCase : Tuple = len(__a )
self.assertEqual(k.knapsack(__a , __a , __a , __a ) , 0 )
lowerCamelCase : str = [60]
lowerCamelCase : str = [10]
lowerCamelCase : str = len(__a )
self.assertEqual(k.knapsack(__a , __a , __a , __a ) , 0 )
def a__ ( self: List[Any] )-> int:
lowerCamelCase : str = 3
lowerCamelCase : List[str] = [1, 2, 3]
lowerCamelCase : List[Any] = [3, 2, 1]
lowerCamelCase : Tuple = len(__a )
self.assertEqual(k.knapsack(__a , __a , __a , __a ) , 5 )
def a__ ( self: int )-> str:
lowerCamelCase : List[Any] = 50
lowerCamelCase : Tuple = [60, 100, 120]
lowerCamelCase : Optional[Any] = [10, 20, 30]
lowerCamelCase : List[str] = len(__a )
self.assertEqual(k.knapsack(__a , __a , __a , __a ) , 220 )
if __name__ == "__main__":
unittest.main()
| 703
|
"""simple docstring"""
import os
def snake_case ( ) -> Optional[Any]:
with open(os.path.dirname(UpperCamelCase__ ) + """/grid.txt""" ) as f:
lowerCamelCase : int = [] # noqa: E741
for _ in range(20 ):
l.append([int(UpperCamelCase__ ) for x in f.readline().split()] )
lowerCamelCase : Union[str, Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
lowerCamelCase : Dict = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
lowerCamelCase : Tuple = temp
# down
for i in range(17 ):
for j in range(20 ):
lowerCamelCase : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
lowerCamelCase : Optional[Any] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
lowerCamelCase : List[Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
lowerCamelCase : List[str] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
lowerCamelCase : List[str] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
lowerCamelCase : List[Any] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 42
| 0
|
"""simple docstring"""
import copy
import re
class A__ :
"""simple docstring"""
snake_case__ : Optional[Any] ='''hp'''
snake_case__ : Optional[Any] ={}
snake_case__ : int =None
@classmethod
def a__ ( cls: int , __a: int , __a: str )-> List[str]:
lowerCamelCase : Dict = prefix
lowerCamelCase : Union[str, Any] = defaults
cls.build_naming_info()
@staticmethod
def a__ ( __a: Union[str, Any] , __a: List[str] )-> Optional[int]:
if len(__a ) == 0:
return ""
lowerCamelCase : Tuple = None
if any(char.isdigit() for char in word ):
raise Exception(f'Parameters should not contain numbers: \'{word}\' contains a number' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(__a ) + 1 ):
lowerCamelCase : Tuple = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
lowerCamelCase : Any = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(__a: Union[str, Any] ):
lowerCamelCase : List[str] = """"""
while integer != 0:
lowerCamelCase : List[Any] = chr(ord("""A""" ) + integer % 10 ) + s
integer //= 10
return s
lowerCamelCase : Tuple = 0
while True:
lowerCamelCase : str = word + """#""" + int_to_alphabetic(__a )
if sword in info["reverse_short_word"]:
continue
else:
lowerCamelCase : List[Any] = sword
break
lowerCamelCase : Dict = short_word
lowerCamelCase : Dict = word
return short_word
@staticmethod
def a__ ( __a: List[str] , __a: Optional[int] )-> str:
lowerCamelCase : Tuple = param_name.split("""_""" )
lowerCamelCase : Dict = [TrialShortNamer.shortname_for_word(__a , __a ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
lowerCamelCase : List[str] = ["""""", """_"""]
for separator in separators:
lowerCamelCase : Any = separator.join(__a )
if shortname not in info["reverse_short_param"]:
lowerCamelCase : List[str] = shortname
lowerCamelCase : int = param_name
return shortname
return param_name
@staticmethod
def a__ ( __a: Any , __a: Dict )-> List[str]:
lowerCamelCase : Dict = TrialShortNamer.shortname_for_key(__a , __a )
lowerCamelCase : Optional[int] = short_name
lowerCamelCase : Dict = param_name
@classmethod
def a__ ( cls: int )-> Optional[int]:
if cls.NAMING_INFO is not None:
return
lowerCamelCase : Dict = {
"""short_word""": {},
"""reverse_short_word""": {},
"""short_param""": {},
"""reverse_short_param""": {},
}
lowerCamelCase : int = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(__a , __a )
lowerCamelCase : int = info
@classmethod
def a__ ( cls: Tuple , __a: Union[str, Any] )-> str:
cls.build_naming_info()
assert cls.PREFIX is not None
lowerCamelCase : Union[str, Any] = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f'You should provide a default value for the param name {k} with value {v}' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
lowerCamelCase : Union[str, Any] = cls.NAMING_INFO["""short_param"""][k]
if isinstance(__a , __a ):
lowerCamelCase : Optional[int] = 1 if v else 0
lowerCamelCase : Tuple = """""" if isinstance(__a , (int, float) ) else """-"""
lowerCamelCase : List[str] = f'{key}{sep}{v}'
name.append(__a )
return "_".join(__a )
@classmethod
def a__ ( cls: Any , __a: Optional[Any] )-> List[str]:
lowerCamelCase : int = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
lowerCamelCase : Dict = []
else:
lowerCamelCase : List[str] = repr.split("""_""" )
lowerCamelCase : List[str] = {}
for value in values:
if "-" in value:
lowerCamelCase : Any = value.split("""-""" )
else:
lowerCamelCase : str = re.sub("""[0-9.]""" , """""" , __a )
lowerCamelCase : Dict = float(re.sub("""[^0-9.]""" , """""" , __a ) )
lowerCamelCase : Union[str, Any] = cls.NAMING_INFO["""reverse_short_param"""][p_k]
lowerCamelCase : str = p_v
for k in cls.DEFAULTS:
if k not in parameters:
lowerCamelCase : int = cls.DEFAULTS[k]
return parameters
| 704
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
__lowerCamelCase :Any = False
@skip_mps
class A__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] =StableDiffusionAttendAndExcitePipeline
snake_case__ : Any =False
snake_case__ : Dict =TEXT_TO_IMAGE_PARAMS
snake_case__ : Any =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''})
snake_case__ : Dict =TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case__ : str =TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def a__ ( cls: Dict )-> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(__a )
@classmethod
def a__ ( cls: Union[str, Any] )-> Any:
super().tearDownClass()
torch.use_deterministic_algorithms(__a )
def a__ ( self: Tuple )-> Union[str, Any]:
torch.manual_seed(0 )
lowerCamelCase : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , )
lowerCamelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
lowerCamelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
lowerCamelCase : Optional[int] = CLIPTextModel(__a )
lowerCamelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def a__ ( self: Tuple , __a: int , __a: Union[str, Any]=0 )-> Optional[Any]:
if str(__a ).startswith("""mps""" ):
lowerCamelCase : Tuple = torch.manual_seed(__a )
else:
lowerCamelCase : str = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : Dict = {
"""prompt""": """a cat and a frog""",
"""token_indices""": [2, 5],
"""generator""": generator,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""max_iter_to_alter""": 2,
"""thresholds""": {0: 0.7},
}
return inputs
def a__ ( self: Dict )-> str:
lowerCamelCase : Tuple = """cpu"""
lowerCamelCase : List[str] = self.get_dummy_components()
lowerCamelCase : List[Any] = self.pipeline_class(**__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Any = self.get_dummy_inputs(__a )
lowerCamelCase : Union[str, Any] = pipe(**__a ).images
lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
lowerCamelCase : Optional[Any] = np.array(
[0.63_90_53_64, 0.62_89_73_07, 0.48_59_90_17, 0.5_13_36_24, 0.5_55_00_48, 0.45_76_95_16, 0.50_32_69_73, 0.5_02_31_39, 0.45_38_44_96] )
lowerCamelCase : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__a , 1e-3 )
def a__ ( self: int )-> Optional[Any]:
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def a__ ( self: Union[str, Any] )-> Optional[int]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a__ ( self: Tuple )-> int:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def a__ ( self: Dict )-> List[Any]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def a__ ( self: Optional[int] )-> Dict:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def a__ ( self: Any )-> Tuple:
super().test_save_load_local(expected_max_difference=5e-4 )
def a__ ( self: str )-> str:
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class A__ ( unittest.TestCase):
"""simple docstring"""
@classmethod
def a__ ( cls: Any )-> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(__a )
@classmethod
def a__ ( cls: Dict )-> Optional[int]:
super().tearDownClass()
torch.use_deterministic_algorithms(__a )
def a__ ( self: int )-> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : List[Any] = torch.manual_seed(51 )
lowerCamelCase : List[str] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , safety_checker=__a , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
lowerCamelCase : Dict = """a painting of an elephant with glasses"""
lowerCamelCase : Any = [5, 7]
lowerCamelCase : Tuple = pipe(
prompt=__a , token_indices=__a , guidance_scale=7.5 , generator=__a , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0]
lowerCamelCase : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 42
| 0
|
"""simple docstring"""
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : bool = False ) -> bool:
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3317044064679887385961981 and not allow_probable:
raise ValueError(
"""Warning: upper bound of deterministic test is exceeded. """
"""Pass allow_probable=True to allow probabilistic test. """
"""A return value of True indicates a probable prime.""" )
# array bounds provided by analysis
lowerCamelCase : Tuple = [
2047,
1373653,
25326001,
3215031751,
2152302898747,
3474749660383,
341550071728321,
1,
3825123056546413051,
1,
1,
318665857834031151167461,
3317044064679887385961981,
]
lowerCamelCase : str = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(UpperCamelCase__ , 1 ):
if n < _p:
# then we have our last prime to check
lowerCamelCase : List[str] = primes[:idx]
break
lowerCamelCase : List[Any] = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
lowerCamelCase : List[str] = False
for r in range(UpperCamelCase__ ):
lowerCamelCase : Union[str, Any] = pow(UpperCamelCase__ , d * 2**r , UpperCamelCase__ )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
lowerCamelCase : Any = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def snake_case ( ) -> None:
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838201 )
assert miller_rabin(838207 )
# 1_373_653
assert not miller_rabin(17316001 )
assert miller_rabin(17316017 )
# 25_326_001
assert not miller_rabin(3078386641 )
assert miller_rabin(3078386653 )
# 3_215_031_751
assert not miller_rabin(1713045574801 )
assert miller_rabin(1713045574819 )
# 2_152_302_898_747
assert not miller_rabin(2779799728307 )
assert miller_rabin(2779799728327 )
# 3_474_749_660_383
assert not miller_rabin(113850023909441 )
assert miller_rabin(113850023909527 )
# 341_550_071_728_321
assert not miller_rabin(1275041018848804351 )
assert miller_rabin(1275041018848804391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79666464458507787791867 )
assert miller_rabin(79666464458507787791951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552840677446647897660333 )
assert miller_rabin(552840677446647897660359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 705
|
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class A__ :
"""simple docstring"""
def __init__( self: List[str] , __a: List[str] , __a: Dict=13 , __a: Tuple=7 , __a: Dict=False , __a: str=True , __a: List[Any]=False , __a: Dict=True , __a: Any=33 , __a: Optional[Any]=32 , __a: List[Any]=5 , __a: Any=4 , __a: Dict=37 , __a: str="gelu" , __a: str=0.1 , __a: int=0.1 , __a: Optional[int]=512 , __a: List[Any]=16 , __a: int=2 , __a: int=0.02 , __a: Optional[int]=3 , __a: str=4 , __a: Tuple=None , )-> Tuple:
lowerCamelCase : Union[str, Any] = parent
lowerCamelCase : Tuple = batch_size
lowerCamelCase : Any = seq_length
lowerCamelCase : Any = is_training
lowerCamelCase : Tuple = use_input_mask
lowerCamelCase : int = use_token_type_ids
lowerCamelCase : List[str] = use_labels
lowerCamelCase : Optional[int] = vocab_size
lowerCamelCase : Tuple = hidden_size
lowerCamelCase : List[str] = num_hidden_layers
lowerCamelCase : Optional[int] = num_attention_heads
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : Union[str, Any] = hidden_dropout_prob
lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase : Any = max_position_embeddings
lowerCamelCase : str = type_vocab_size
lowerCamelCase : List[Any] = type_sequence_label_size
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : Union[str, Any] = num_labels
lowerCamelCase : Optional[Any] = num_choices
lowerCamelCase : Any = scope
def a__ ( self: Optional[int] )-> List[Any]:
lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : Dict = None
if self.use_input_mask:
lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase : Any = None
lowerCamelCase : int = None
lowerCamelCase : Union[str, Any] = None
if self.use_labels:
lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase : List[str] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self: Tuple )-> Union[str, Any]:
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def a__ ( self: List[Any] , __a: List[str] , __a: str , __a: Tuple , __a: List[str] , __a: List[str] , __a: str )-> int:
lowerCamelCase : Optional[int] = EsmModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a , attention_mask=__a )
lowerCamelCase : str = model(__a )
lowerCamelCase : Optional[Any] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ ( self: int , __a: Union[str, Any] , __a: Optional[int] , __a: List[str] , __a: str , __a: List[str] , __a: Tuple )-> int:
lowerCamelCase : str = EsmForMaskedLM(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self: List[str] , __a: List[Any] , __a: List[str] , __a: int , __a: Union[str, Any] , __a: List[Any] , __a: Tuple )-> List[str]:
lowerCamelCase : Tuple = self.num_labels
lowerCamelCase : Dict = EsmForTokenClassification(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self: Optional[int] )-> Optional[int]:
lowerCamelCase : Any = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Tuple = config_and_inputs
lowerCamelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Any =False
snake_case__ : Dict =(
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case__ : Dict =()
snake_case__ : Optional[int] =(
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : Any =True
def a__ ( self: Optional[int] )-> Optional[int]:
lowerCamelCase : Optional[Any] = EsmModelTester(self )
lowerCamelCase : Any = ConfigTester(self , config_class=__a , hidden_size=37 )
def a__ ( self: List[Any] )-> Optional[Any]:
self.config_tester.run_common_tests()
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: Tuple )-> Any:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase : Tuple = type
self.model_tester.create_and_check_model(*__a )
def a__ ( self: List[str] )-> Tuple:
lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def a__ ( self: Any )-> List[Any]:
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : int = EsmModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def a__ ( self: str )-> List[str]:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase : Union[str, Any] = EsmEmbeddings(config=__a )
lowerCamelCase : List[str] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowerCamelCase : Union[str, Any] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowerCamelCase : Optional[Any] = create_position_ids_from_input_ids(__a , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
def a__ ( self: Optional[int] )-> int:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase : Any = EsmEmbeddings(config=__a )
lowerCamelCase : Dict = torch.empty(2 , 4 , 30 )
lowerCamelCase : List[Any] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowerCamelCase : Any = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowerCamelCase : List[str] = embeddings.create_position_ids_from_inputs_embeds(__a )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def a__ ( self: Any )-> Optional[Any]:
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def a__ ( self: Dict )-> Dict:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def a__ ( self: List[str] )-> Dict:
pass
@require_torch
class A__ ( __lowercase):
"""simple docstring"""
@slow
def a__ ( self: Any )-> Union[str, Any]:
with torch.no_grad():
lowerCamelCase : Union[str, Any] = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
lowerCamelCase : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase : Tuple = model(__a )[0]
lowerCamelCase : Dict = 33
lowerCamelCase : List[str] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __a )
lowerCamelCase : Tuple = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
@slow
def a__ ( self: Dict )-> str:
with torch.no_grad():
lowerCamelCase : Any = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
lowerCamelCase : Optional[Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase : Any = model(__a )[0]
# compare the actual values for a slice.
lowerCamelCase : Tuple = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
| 42
| 0
|
"""simple docstring"""
from __future__ import annotations
__lowerCamelCase :int = 1.6_021e-19 # units = C
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float , ) -> tuple[str, float]:
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif conductivity < 0:
raise ValueError("""Conductivity cannot be negative""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative""" )
elif mobility < 0:
raise ValueError("""mobility cannot be negative""" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706
|
"""simple docstring"""
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase :str = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =AlbertTokenizer
snake_case__ : Optional[Any] =AlbertTokenizerFast
snake_case__ : Optional[int] =True
snake_case__ : Any =True
snake_case__ : Optional[int] =True
def a__ ( self: Dict )-> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase : int = AlbertTokenizer(__a )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self: Tuple , __a: Tuple )-> Union[str, Any]:
lowerCamelCase : List[str] = """this is a test"""
lowerCamelCase : int = """this is a test"""
return input_text, output_text
def a__ ( self: Any )-> List[Any]:
lowerCamelCase : int = """<pad>"""
lowerCamelCase : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def a__ ( self: Tuple )-> str:
lowerCamelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(__a ) , 30_000 )
def a__ ( self: List[str] )-> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def a__ ( self: Optional[Any] )-> Union[str, Any]:
if not self.test_rust_tokenizer:
return
lowerCamelCase : str = self.get_tokenizer()
lowerCamelCase : Tuple = self.get_rust_tokenizer()
lowerCamelCase : Union[str, Any] = """I was born in 92000, and this is falsé."""
lowerCamelCase : List[str] = tokenizer.tokenize(__a )
lowerCamelCase : Tuple = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
lowerCamelCase : Dict = tokenizer.encode(__a , add_special_tokens=__a )
lowerCamelCase : List[str] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
lowerCamelCase : Any = self.get_rust_tokenizer()
lowerCamelCase : List[str] = tokenizer.encode(__a )
lowerCamelCase : str = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
def a__ ( self: Tuple )-> List[Any]:
lowerCamelCase : List[str] = AlbertTokenizer(__a , keep_accents=__a )
lowerCamelCase : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__a , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [48, 25, 21, 1_289] )
lowerCamelCase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(__a , [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] )
lowerCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def a__ ( self: Tuple )-> str:
lowerCamelCase : str = AlbertTokenizer(__a )
lowerCamelCase : Union[str, Any] = tokenizer.encode("""sequence builders""" )
lowerCamelCase : List[Any] = tokenizer.encode("""multi-sequence build""" )
lowerCamelCase : Any = tokenizer.build_inputs_with_special_tokens(__a )
lowerCamelCase : str = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def a__ ( self: Any )-> Dict:
# fmt: off
lowerCamelCase : Optional[Any] = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 42
| 0
|
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase :Any = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
__lowerCamelCase :str = 50_003
__lowerCamelCase :int = 50_002
@require_sentencepiece
@require_tokenizers
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =PLBartTokenizer
snake_case__ : Any =None
snake_case__ : Dict =False
def a__ ( self: int )-> Tuple:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase : List[Any] = PLBartTokenizer(__a , language_codes="""base""" , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self: int )-> str:
lowerCamelCase : Optional[Any] = PLBartTokenizer(__a , language_codes="""base""" , keep_accents=__a )
lowerCamelCase : List[str] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__a , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCamelCase : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCamelCase : str = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCamelCase : Dict = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
lowerCamelCase : List[Any] = tokenizer.vocab_size
lowerCamelCase : str = [tokenizer.convert_ids_to_tokens(__a ) for x in range(end - 4 , __a )]
self.assertListEqual(__a , ["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] )
lowerCamelCase : Optional[Any] = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
lowerCamelCase : Optional[int] = tokenizer(__a ).input_ids
self.assertEqual(
tokenizer.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a ) , __a , )
def a__ ( self: Optional[int] )-> Optional[int]:
lowerCamelCase : Optional[Any] = PLBartTokenizer(__a , language_codes="""multi""" , keep_accents=__a )
lowerCamelCase : Optional[int] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__a , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCamelCase : int = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
lowerCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCamelCase : str = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
lowerCamelCase : Tuple = tokenizer.vocab_size
lowerCamelCase : int = [tokenizer.convert_ids_to_tokens(__a ) for x in range(end - 7 , __a )]
self.assertListEqual(
__a , ["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] )
lowerCamelCase : Tuple = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
lowerCamelCase : List[Any] = tokenizer(__a ).input_ids
self.assertEqual(
tokenizer.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a ) , __a , )
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[int] ='''uclanlp/plbart-python-en_XX'''
snake_case__ : str =[
'''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''',
'''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''',
]
snake_case__ : Dict =[
'''Returns the maximum value of a b c.''',
'''Sums the values of a b c.''',
]
snake_case__ : Tuple =[
1_34,
54_52,
3_34_60,
3_34_41,
3_34_63,
3_34_65,
3_34_63,
3_34_49,
9_88,
20,
3_34_56,
19,
3_34_56,
7_71,
39,
42_58,
8_89,
33_18,
3_34_41,
3_34_63,
3_34_65,
3_34_63,
3_34_49,
24_71,
2,
PYTHON_CODE,
]
@classmethod
def a__ ( cls: Any )-> Optional[Any]:
lowerCamelCase : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="""base""" , src_lang="""python""" , tgt_lang="""en_XX""" )
lowerCamelCase : int = 1
return cls
def a__ ( self: Tuple )-> str:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] , 50_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] , 50_002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] , 50_003 )
def a__ ( self: List[str] )-> Tuple:
lowerCamelCase : Any = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __a )
def a__ ( self: Optional[int] )-> Tuple:
self.assertIn(__a , self.tokenizer.all_special_ids )
lowerCamelCase : List[str] = [EN_CODE, 9_037, 33_442, 57, 752, 153, 14, 56, 18, 9, 2]
lowerCamelCase : int = self.tokenizer.decode(__a , skip_special_tokens=__a )
lowerCamelCase : List[str] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__a )
self.assertEqual(__a , __a )
self.assertNotIn(self.tokenizer.eos_token , __a )
def a__ ( self: Union[str, Any] )-> Tuple:
lowerCamelCase : List[Any] = ["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 20]
self.assertIsInstance(src_text[0] , __a )
lowerCamelCase : Dict = 10
lowerCamelCase : str = self.tokenizer(__a , max_length=__a , truncation=__a ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __a )
self.assertEqual(len(__a ) , __a )
def a__ ( self: Optional[Any] )-> int:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ) , [50_004, 50_001] )
def a__ ( self: int )-> Dict:
lowerCamelCase : Optional[Any] = tempfile.mkdtemp()
lowerCamelCase : Optional[Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__a )
lowerCamelCase : str = PLBartTokenizer.from_pretrained(__a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __a )
@require_torch
def a__ ( self: Optional[Any] )-> Optional[Any]:
lowerCamelCase : Optional[int] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__a , return_tensors="""pt""" )
lowerCamelCase : int = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , __a )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def a__ ( self: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__a , truncation=__a , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
lowerCamelCase : Tuple = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(__a , __a )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
lowerCamelCase : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __a )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def a__ ( self: Optional[int] )-> List[str]:
lowerCamelCase : Dict = self.tokenizer(self.src_text , padding=__a , truncation=__a , max_length=3 , return_tensors="""pt""" )
lowerCamelCase : List[str] = self.tokenizer(
text_target=self.tgt_text , padding=__a , truncation=__a , max_length=10 , return_tensors="""pt""" )
lowerCamelCase : Optional[int] = targets["""input_ids"""]
lowerCamelCase : int = shift_tokens_right(__a , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def a__ ( self: List[Any] )-> Any:
lowerCamelCase : List[str] = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""java""" )
self.assertEqual(
nested_simplify(__a ) , {
# A, test, EOS, en_XX
"""input_ids""": [[150, 242, 2, 50_003]],
"""attention_mask""": [[1, 1, 1, 1]],
# java
"""forced_bos_token_id""": 50_001,
} , )
| 707
|
"""simple docstring"""
__lowerCamelCase :List[Any] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__lowerCamelCase :Union[str, Any] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def snake_case ( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] ) -> list[int]:
lowerCamelCase : Tuple = True
lowerCamelCase : Any = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
order.append(UpperCamelCase__ )
return order
def snake_case ( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] ) -> list[int]:
lowerCamelCase : List[Any] = True
lowerCamelCase : int = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return component
def snake_case ( UpperCamelCase__ : dict[int, list[int]] ) -> list[list[int]]:
lowerCamelCase : int = len(UpperCamelCase__ ) * [False]
lowerCamelCase : dict[int, list[int]] = {vert: [] for vert in range(len(UpperCamelCase__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(UpperCamelCase__ )
lowerCamelCase : int = []
for i, was_visited in enumerate(UpperCamelCase__ ):
if not was_visited:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Tuple = []
lowerCamelCase : str = len(UpperCamelCase__ ) * [False]
for i in range(len(UpperCamelCase__ ) ):
lowerCamelCase : Any = order[len(UpperCamelCase__ ) - i - 1]
if not visited[vert]:
lowerCamelCase : List[str] = find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
components_list.append(UpperCamelCase__ )
return components_list
| 42
| 0
|
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
__lowerCamelCase :Tuple = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def lowerCAmelCase ( UpperCamelCase__ : Dict , UpperCamelCase__ : tuple , UpperCamelCase__ : Path , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict=False , ) -> Union[str, Any]:
output_path.parent.mkdir(parents=UpperCamelCase__ , exist_ok=UpperCamelCase__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
UpperCamelCase__ , UpperCamelCase__ , f=output_path.as_posix() , input_names=UpperCamelCase__ , output_names=UpperCamelCase__ , dynamic_axes=UpperCamelCase__ , do_constant_folding=UpperCamelCase__ , use_external_data_format=UpperCamelCase__ , enable_onnx_checker=UpperCamelCase__ , opset_version=UpperCamelCase__ , )
else:
export(
UpperCamelCase__ , UpperCamelCase__ , f=output_path.as_posix() , input_names=UpperCamelCase__ , output_names=UpperCamelCase__ , dynamic_axes=UpperCamelCase__ , do_constant_folding=UpperCamelCase__ , opset_version=UpperCamelCase__ , )
@torch.no_grad()
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : bool = False ) -> Dict:
lowerCamelCase : Tuple = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowerCamelCase : str = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
lowerCamelCase : Dict = """cpu"""
lowerCamelCase : Optional[Any] = Path(UpperCamelCase__ )
# VAE DECODER
lowerCamelCase : List[Any] = AutoencoderKL.from_pretrained(model_path + """/vae""" )
lowerCamelCase : Optional[int] = vae_decoder.config.latent_channels
# forward only through the decoder part
lowerCamelCase : Dict = vae_decoder.decode
onnx_export(
UpperCamelCase__ , model_args=(
torch.randn(1 , UpperCamelCase__ , 25 , 25 ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=UpperCamelCase__ , )
del vae_decoder
if __name__ == "__main__":
__lowerCamelCase :List[str] = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=14,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
__lowerCamelCase :int = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('SD: Done: ONNX')
| 708
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :str = logging.get_logger(__name__)
__lowerCamelCase :Any = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : List[Any] ='''time_series_transformer'''
snake_case__ : List[Any] ={
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self: List[str] , __a: Optional[int] = None , __a: Optional[int] = None , __a: str = "student_t" , __a: str = "nll" , __a: int = 1 , __a: List[int] = [1, 2, 3, 4, 5, 6, 7] , __a: Optional[Union[str, bool]] = "mean" , __a: int = 0 , __a: int = 0 , __a: int = 0 , __a: int = 0 , __a: Optional[List[int]] = None , __a: Optional[List[int]] = None , __a: int = 32 , __a: int = 32 , __a: int = 2 , __a: int = 2 , __a: int = 2 , __a: int = 2 , __a: bool = True , __a: str = "gelu" , __a: int = 64 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: int = 100 , __a: float = 0.02 , __a: Tuple=True , **__a: str , )-> Any:
# time series specific configuration
lowerCamelCase : str = prediction_length
lowerCamelCase : Optional[Any] = context_length or prediction_length
lowerCamelCase : Tuple = distribution_output
lowerCamelCase : Any = loss
lowerCamelCase : List[Any] = input_size
lowerCamelCase : int = num_time_features
lowerCamelCase : Dict = lags_sequence
lowerCamelCase : Optional[int] = scaling
lowerCamelCase : int = num_dynamic_real_features
lowerCamelCase : Tuple = num_static_real_features
lowerCamelCase : Any = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
lowerCamelCase : int = cardinality
else:
lowerCamelCase : Dict = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
lowerCamelCase : str = embedding_dimension
else:
lowerCamelCase : str = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCamelCase : Any = num_parallel_samples
# Transformer architecture configuration
lowerCamelCase : Any = input_size * len(__a ) + self._number_of_features
lowerCamelCase : List[str] = d_model
lowerCamelCase : Tuple = encoder_attention_heads
lowerCamelCase : Optional[int] = decoder_attention_heads
lowerCamelCase : Union[str, Any] = encoder_ffn_dim
lowerCamelCase : str = decoder_ffn_dim
lowerCamelCase : str = encoder_layers
lowerCamelCase : Any = decoder_layers
lowerCamelCase : Optional[int] = dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Tuple = activation_dropout
lowerCamelCase : Optional[int] = encoder_layerdrop
lowerCamelCase : int = decoder_layerdrop
lowerCamelCase : Optional[int] = activation_function
lowerCamelCase : Optional[Any] = init_std
lowerCamelCase : Optional[Any] = use_cache
super().__init__(is_encoder_decoder=__a , **__a )
@property
def a__ ( self: int )-> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 42
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowerCamelCase :int = logging.get_logger(__name__)
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : int =['''pixel_values''']
def __init__( self: str , __a: bool = True , __a: Dict[str, int] = None , __a: float = None , __a: PILImageResampling = PILImageResampling.BILINEAR , __a: bool = True , __a: Union[int, float] = 1 / 255 , __a: bool = True , __a: Optional[Union[float, List[float]]] = None , __a: Optional[Union[float, List[float]]] = None , **__a: Optional[int] , )-> None:
super().__init__(**__a )
lowerCamelCase : Optional[Any] = size if size is not None else {"""shortest_edge""": 384}
lowerCamelCase : List[str] = get_size_dict(__a , default_to_square=__a )
lowerCamelCase : Tuple = do_resize
lowerCamelCase : Dict = size
# Default value set here for backwards compatibility where the value in config is None
lowerCamelCase : List[Any] = crop_pct if crop_pct is not None else 224 / 256
lowerCamelCase : Dict = resample
lowerCamelCase : Any = do_rescale
lowerCamelCase : str = rescale_factor
lowerCamelCase : Optional[Any] = do_normalize
lowerCamelCase : int = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a__ ( self: Dict , __a: np.ndarray , __a: Dict[str, int] , __a: float , __a: PILImageResampling = PILImageResampling.BICUBIC , __a: Optional[Union[str, ChannelDimension]] = None , **__a: Optional[Any] , )-> np.ndarray:
lowerCamelCase : List[Any] = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" not in size:
raise ValueError(f'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
lowerCamelCase : Optional[Any] = size["""shortest_edge"""]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
lowerCamelCase : Union[str, Any] = int(shortest_edge / crop_pct )
lowerCamelCase : str = get_resize_output_image_size(__a , size=__a , default_to_square=__a )
lowerCamelCase : Optional[int] = resize(image=__a , size=__a , resample=__a , data_format=__a , **__a )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=__a , size=(shortest_edge, shortest_edge) , data_format=__a , **__a )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
__a , size=(shortest_edge, shortest_edge) , resample=__a , data_format=__a , **__a )
def a__ ( self: List[Any] , __a: np.ndarray , __a: Union[int, float] , __a: Optional[Union[str, ChannelDimension]] = None , **__a: Tuple , )-> Union[str, Any]:
return rescale(__a , scale=__a , data_format=__a , **__a )
def a__ ( self: Dict , __a: np.ndarray , __a: Union[float, List[float]] , __a: Union[float, List[float]] , __a: Optional[Union[str, ChannelDimension]] = None , **__a: List[Any] , )-> np.ndarray:
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def a__ ( self: Dict , __a: ImageInput , __a: bool = None , __a: Dict[str, int] = None , __a: float = None , __a: PILImageResampling = None , __a: bool = None , __a: float = None , __a: bool = None , __a: Optional[Union[float, List[float]]] = None , __a: Optional[Union[float, List[float]]] = None , __a: Optional[Union[str, TensorType]] = None , __a: ChannelDimension = ChannelDimension.FIRST , **__a: str , )-> PIL.Image.Image:
lowerCamelCase : Dict = do_resize if do_resize is not None else self.do_resize
lowerCamelCase : List[str] = crop_pct if crop_pct is not None else self.crop_pct
lowerCamelCase : Optional[int] = resample if resample is not None else self.resample
lowerCamelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase : int = image_mean if image_mean is not None else self.image_mean
lowerCamelCase : Tuple = image_std if image_std is not None else self.image_std
lowerCamelCase : Tuple = size if size is not None else self.size
lowerCamelCase : Any = get_size_dict(__a , default_to_square=__a )
lowerCamelCase : Any = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("""crop_pct must be specified if size < 384.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase : int = [to_numpy_array(__a ) for image in images]
if do_resize:
lowerCamelCase : Tuple = [self.resize(image=__a , size=__a , crop_pct=__a , resample=__a ) for image in images]
if do_rescale:
lowerCamelCase : Tuple = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
lowerCamelCase : List[str] = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
lowerCamelCase : Dict = [to_channel_dimension_format(__a , __a ) for image in images]
lowerCamelCase : int = {"""pixel_values""": images}
return BatchFeature(data=__a , tensor_type=__a )
| 709
|
"""simple docstring"""
from __future__ import annotations
__lowerCamelCase :int = 10
def snake_case ( UpperCamelCase__ : list[int] ) -> list[int]:
lowerCamelCase : int = 1
lowerCamelCase : Union[str, Any] = max(UpperCamelCase__ )
while placement <= max_digit:
# declare and initialize empty buckets
lowerCamelCase : list[list] = [[] for _ in range(UpperCamelCase__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
lowerCamelCase : Any = int((i / placement) % RADIX )
buckets[tmp].append(UpperCamelCase__ )
# put each buckets' contents into list_of_ints
lowerCamelCase : Dict = 0
for b in range(UpperCamelCase__ ):
for i in buckets[b]:
lowerCamelCase : List[str] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
| 0
|
"""simple docstring"""
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def snake_case ( UpperCamelCase__ : np.ndarray , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> np.ndarray:
lowerCamelCase : Optional[int] = cva.getAffineTransform(UpperCamelCase__ , UpperCamelCase__ )
return cva.warpAffine(UpperCamelCase__ , UpperCamelCase__ , (rows, cols) )
if __name__ == "__main__":
# read original image
__lowerCamelCase :List[Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
__lowerCamelCase :Dict = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__lowerCamelCase :List[Any] = gray_img.shape
# set different points to rotate image
__lowerCamelCase :List[Any] = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
__lowerCamelCase :List[Any] = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
__lowerCamelCase :Tuple = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
__lowerCamelCase :Optional[int] = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
__lowerCamelCase :List[str] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__lowerCamelCase :Optional[int] = plt.figure(1)
__lowerCamelCase :Optional[Any] = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 710
|
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=None ) -> Tuple:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match'
lowerCamelCase : Dict = nn.Parameter(UpperCamelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match'
lowerCamelCase : Any = nn.Parameter(UpperCamelCase__ )
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ) -> Union[str, Any]:
# set torch weights for 1-to-1 comparison
lowerCamelCase : Dict = np.asarray(weights[0] )
lowerCamelCase : List[Any] = np.asarray(weights[1] )
lowerCamelCase : List[str] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any ) -> List[Any]:
# set torch weights for 1-to-1 comparison
lowerCamelCase : Tuple = np.asarray(weights[0] )
lowerCamelCase : Any = np.asarray(weights[1] )
lowerCamelCase : List[Any] = np.asarray(weights[2] )
lowerCamelCase : List[str] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def snake_case ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ) -> Optional[Any]:
# layernorm 1
lowerCamelCase : str = weights[0][0][0]
lowerCamelCase : Optional[int] = np.asarray(layer_norm_a[0] )
lowerCamelCase : Tuple = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# lsh weights + output
lowerCamelCase : List[Any] = weights[0][1]
if len(UpperCamelCase__ ) < 4:
set_layer_weights_in_torch_lsh(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ )
else:
set_layer_weights_in_torch_local(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ )
# intermediate weighs
lowerCamelCase : int = weights[2][0][1][2]
# Chunked Feed Forward
if len(UpperCamelCase__ ) == 4:
lowerCamelCase : Dict = intermediate_weights[2]
# layernorm 2
lowerCamelCase : Optional[int] = np.asarray(intermediate_weights[0][0] )
lowerCamelCase : Tuple = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# intermediate dense
lowerCamelCase : Optional[Any] = np.asarray(intermediate_weights[1][0] )
lowerCamelCase : Union[str, Any] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
# intermediate out
lowerCamelCase : Optional[int] = np.asarray(intermediate_weights[4][0] )
lowerCamelCase : List[Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int ) -> List[Any]:
# reformer model
lowerCamelCase : List[Any] = torch_model.reformer
# word embeds
lowerCamelCase : Union[str, Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(UpperCamelCase__ ) , )
if isinstance(weights[3] , UpperCamelCase__ ):
lowerCamelCase : Union[str, Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCamelCase : str = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'{position_embeddings[emb_idx]} emb does not match'
lowerCamelCase : Dict = nn.Parameter(torch.tensor(UpperCamelCase__ ) )
lowerCamelCase : int = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
UpperCamelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCamelCase : Dict = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# output layer norm
lowerCamelCase : Any = np.asarray(weights[7][0] )
lowerCamelCase : List[str] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# output embeddings
lowerCamelCase : List[Any] = np.asarray(weights[9][0] )
lowerCamelCase : Optional[int] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str ) -> Optional[int]:
# Initialise PyTorch model
lowerCamelCase : Union[str, Any] = ReformerConfig.from_json_file(UpperCamelCase__ )
print(F'Building PyTorch model from configuration: {config}' )
lowerCamelCase : str = ReformerModelWithLMHead(UpperCamelCase__ )
with open(UpperCamelCase__ , """rb""" ) as f:
lowerCamelCase : str = pickle.load(UpperCamelCase__ )["""weights"""]
set_model_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , config.hidden_size )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
__lowerCamelCase :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCamelCase :Optional[int] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 42
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase :int = logging.get_logger(__name__)
__lowerCamelCase :str = {
'facebook/deit-base-distilled-patch16-224': (
'https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Any ='''deit'''
def __init__( self: Dict , __a: Tuple=768 , __a: str=12 , __a: Union[str, Any]=12 , __a: Optional[int]=3_072 , __a: Dict="gelu" , __a: List[Any]=0.0 , __a: Dict=0.0 , __a: List[Any]=0.02 , __a: Optional[Any]=1e-1_2 , __a: Union[str, Any]=224 , __a: Dict=16 , __a: Optional[Any]=3 , __a: List[Any]=True , __a: Optional[Any]=16 , **__a: List[Any] , )-> Tuple:
super().__init__(**__a )
lowerCamelCase : str = hidden_size
lowerCamelCase : List[Any] = num_hidden_layers
lowerCamelCase : List[str] = num_attention_heads
lowerCamelCase : Tuple = intermediate_size
lowerCamelCase : Union[str, Any] = hidden_act
lowerCamelCase : Dict = hidden_dropout_prob
lowerCamelCase : Tuple = attention_probs_dropout_prob
lowerCamelCase : Any = initializer_range
lowerCamelCase : List[Any] = layer_norm_eps
lowerCamelCase : Tuple = image_size
lowerCamelCase : List[Any] = patch_size
lowerCamelCase : Optional[int] = num_channels
lowerCamelCase : str = qkv_bias
lowerCamelCase : str = encoder_stride
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Union[str, Any] =version.parse('''1.11''')
@property
def a__ ( self: Dict )-> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def a__ ( self: int )-> float:
return 1e-4
| 711
|
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class A__ ( nn.Module):
"""simple docstring"""
def __init__( self: Dict )-> Dict:
super().__init__()
lowerCamelCase : Tuple = nn.Linear(3 , 4 )
lowerCamelCase : Optional[Any] = nn.BatchNormad(4 )
lowerCamelCase : Optional[Any] = nn.Linear(4 , 5 )
def a__ ( self: List[str] , __a: List[Any] )-> Optional[Any]:
return self.lineara(self.batchnorm(self.lineara(__a ) ) )
class A__ ( __lowercase):
"""simple docstring"""
def a__ ( self: Tuple , __a: int , *__a: Any , **__a: Tuple )-> Tuple:
return (args[0] + 1,) + args[1:], kwargs
class A__ ( __lowercase):
"""simple docstring"""
def a__ ( self: Optional[int] , __a: List[str] , __a: List[Any] )-> List[str]:
return output + 1
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: int )-> str:
lowerCamelCase : List[str] = ModelForTest()
lowerCamelCase : Dict = ModelHook()
add_hook_to_module(__a , __a )
self.assertEqual(test_model._hf_hook , __a )
self.assertTrue(hasattr(__a , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__a )
self.assertFalse(hasattr(__a , """_hf_hook""" ) )
self.assertFalse(hasattr(__a , """_old_forward""" ) )
def a__ ( self: int )-> str:
lowerCamelCase : List[str] = ModelForTest()
lowerCamelCase : Union[str, Any] = ModelHook()
add_hook_to_module(__a , __a )
add_hook_to_module(__a , __a , append=__a )
self.assertEqual(isinstance(test_model._hf_hook , __a ) , __a )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__a , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__a )
self.assertFalse(hasattr(__a , """_hf_hook""" ) )
self.assertFalse(hasattr(__a , """_old_forward""" ) )
def a__ ( self: List[Any] )-> List[str]:
lowerCamelCase : str = ModelForTest()
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : Union[str, Any] = test_model(x + 1 )
lowerCamelCase : Optional[int] = test_model(x + 2 )
lowerCamelCase : List[Any] = PreForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[int] = test_model(__a )
self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCamelCase : Dict = PreForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Tuple = test_model(__a )
self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCamelCase : Any = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[Any] = test_model(__a )
assert torch.allclose(__a , __a , atol=1e-5 )
def a__ ( self: Any )-> Optional[int]:
lowerCamelCase : str = ModelForTest()
lowerCamelCase : List[str] = torch.randn(2 , 3 )
lowerCamelCase : int = test_model(__a )
lowerCamelCase : Dict = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Tuple = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCamelCase : str = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[Any] = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCamelCase : Union[str, Any] = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__a , __a )
lowerCamelCase : str = test_model(__a )
assert torch.allclose(__a , output + 2 , atol=1e-5 )
def a__ ( self: int )-> Dict:
lowerCamelCase : List[Any] = ModelForTest()
lowerCamelCase : Optional[int] = torch.randn(2 , 3 )
lowerCamelCase : List[str] = test_model(__a )
lowerCamelCase : Any = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : str = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 ) )
self.assertTrue(outputa.requires_grad )
lowerCamelCase : Optional[int] = True
lowerCamelCase : Optional[int] = test_model(__a )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def a__ ( self: List[str] )-> Union[str, Any]:
lowerCamelCase : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
lowerCamelCase : str = torch.randn(2 , 3 )
lowerCamelCase : Dict = model(__a )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__a , AlignDevicesHook(io_same_device=__a ) )
lowerCamelCase : Optional[int] = torch.randn(2 , 3 ).to(0 )
lowerCamelCase : str = model(__a )
self.assertEqual(output.device , torch.device(0 ) )
def a__ ( self: List[str] )-> Tuple:
lowerCamelCase : Union[str, Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : Tuple = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Optional[Any] = torch.randn(2 , 3 )
lowerCamelCase : Optional[Any] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
lowerCamelCase : Any = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : int = torch.randn(2 , 3 )
lowerCamelCase : Optional[int] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def a__ ( self: Any )-> List[str]:
lowerCamelCase : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : int = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__a , execution_device=__a , offload=__a )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(__a )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : Optional[Any] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__a , execution_device=__a , offload=__a , offload_buffers=__a )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : Optional[int] = torch.randn(2 , 3 )
lowerCamelCase : int = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def a__ ( self: Optional[Any] )-> List[Any]:
lowerCamelCase : List[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : Any = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__a , execution_device=__a , offload=__a , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(__a )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : int = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__a , execution_device=__a , offload=__a , weights_map=model.state_dict() , offload_buffers=__a , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : Tuple = torch.randn(2 , 3 )
lowerCamelCase : Any = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 42
| 0
|
"""simple docstring"""
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase :Tuple = logging.get_logger(__name__)
def snake_case ( UpperCamelCase__ : Dict ) -> Any:
print("""Loading config file...""" )
def flatten_yaml_as_dict(UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str]="" , UpperCamelCase__ : List[str]="." ):
lowerCamelCase : List[Any] = []
for k, v in d.items():
lowerCamelCase : int = parent_key + sep + k if parent_key else k
if isinstance(UpperCamelCase__ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(UpperCamelCase__ , UpperCamelCase__ , sep=UpperCamelCase__ ).items() )
else:
items.append((new_key, v) )
return dict(UpperCamelCase__ )
lowerCamelCase : List[str] = argparse.Namespace()
with open(UpperCamelCase__ , """r""" ) as yaml_file:
try:
lowerCamelCase : str = yaml.load(UpperCamelCase__ , Loader=yaml.FullLoader )
lowerCamelCase : Optional[int] = flatten_yaml_as_dict(UpperCamelCase__ )
for k, v in flat_cfg.items():
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
except yaml.YAMLError as exc:
logger.error("""Error while loading config file: {}. Error message: {}""".format(UpperCamelCase__ , str(UpperCamelCase__ ) ) )
return config
def snake_case ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple ) -> Optional[Any]:
lowerCamelCase : Optional[int] = MobileViTVaConfig()
lowerCamelCase : Union[str, Any] = False
# dataset
if task_name.startswith("""imagenet1k_""" ):
lowerCamelCase : List[Any] = 1000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
lowerCamelCase : Any = 384
else:
lowerCamelCase : str = 256
lowerCamelCase : Tuple = """imagenet-1k-id2label.json"""
elif task_name.startswith("""imagenet21k_to_1k_""" ):
lowerCamelCase : Optional[Any] = 21000
if int(task_name.strip().split("""_""" )[-1] ) == 384:
lowerCamelCase : Dict = 384
else:
lowerCamelCase : Tuple = 256
lowerCamelCase : Dict = """imagenet-22k-id2label.json"""
elif task_name.startswith("""ade20k_""" ):
lowerCamelCase : List[str] = 151
lowerCamelCase : Dict = 512
lowerCamelCase : Optional[Any] = """ade20k-id2label.json"""
lowerCamelCase : Tuple = True
elif task_name.startswith("""voc_""" ):
lowerCamelCase : List[Any] = 21
lowerCamelCase : Union[str, Any] = 512
lowerCamelCase : str = """pascal-voc-id2label.json"""
lowerCamelCase : str = True
# orig_config
lowerCamelCase : Optional[Any] = load_orig_config_file(UpperCamelCase__ )
assert getattr(UpperCamelCase__ , """model.classification.name""" , -1 ) == "mobilevit_v2", "Invalid model"
lowerCamelCase : str = getattr(UpperCamelCase__ , """model.classification.mitv2.width_multiplier""" , 1.0 )
assert (
getattr(UpperCamelCase__ , """model.classification.mitv2.attn_norm_layer""" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
lowerCamelCase : str = getattr(UpperCamelCase__ , """model.classification.activation.name""" , """swish""" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
lowerCamelCase : Tuple = getattr(UpperCamelCase__ , """model.segmentation.output_stride""" , 16 )
if "_deeplabv3" in task_name:
lowerCamelCase : int = getattr(UpperCamelCase__ , """model.segmentation.deeplabv3.aspp_rates""" , [12, 24, 36] )
lowerCamelCase : Any = getattr(UpperCamelCase__ , """model.segmentation.deeplabv3.aspp_out_channels""" , 512 )
lowerCamelCase : str = getattr(UpperCamelCase__ , """model.segmentation.deeplabv3.aspp_dropout""" , 0.1 )
# id2label
lowerCamelCase : Optional[Any] = """huggingface/label-files"""
lowerCamelCase : Optional[Any] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase : Dict = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase : Union[str, Any] = idalabel
lowerCamelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def snake_case ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ) -> Tuple:
lowerCamelCase : Any = dct.pop(UpperCamelCase__ )
lowerCamelCase : List[str] = val
def snake_case ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any]=False ) -> int:
if base_model:
lowerCamelCase : Any = """"""
else:
lowerCamelCase : Dict = """mobilevitv2."""
lowerCamelCase : List[str] = []
for k in state_dict.keys():
if k[:8] == "encoder.":
lowerCamelCase : List[Any] = k[8:]
else:
lowerCamelCase : Optional[Any] = k
if ".block." in k:
lowerCamelCase : Dict = k_new.replace(""".block.""" , """.""" )
if ".conv." in k:
lowerCamelCase : int = k_new.replace(""".conv.""" , """.convolution.""" )
if ".norm." in k:
lowerCamelCase : Union[str, Any] = k_new.replace(""".norm.""" , """.normalization.""" )
if "conv_1." in k:
lowerCamelCase : Optional[Any] = k_new.replace("""conv_1.""" , F'{model_prefix}conv_stem.' )
for i in [1, 2]:
if F'layer_{i}.' in k:
lowerCamelCase : Tuple = k_new.replace(F'layer_{i}.' , F'{model_prefix}encoder.layer.{i-1}.layer.' )
if ".exp_1x1." in k:
lowerCamelCase : Optional[int] = k_new.replace(""".exp_1x1.""" , """.expand_1x1.""" )
if ".red_1x1." in k:
lowerCamelCase : int = k_new.replace(""".red_1x1.""" , """.reduce_1x1.""" )
for i in [3, 4, 5]:
if F'layer_{i}.0.' in k:
lowerCamelCase : List[str] = k_new.replace(F'layer_{i}.0.' , F'{model_prefix}encoder.layer.{i-1}.downsampling_layer.' )
if F'layer_{i}.1.local_rep.0.' in k:
lowerCamelCase : Dict = k_new.replace(F'layer_{i}.1.local_rep.0.' , F'{model_prefix}encoder.layer.{i-1}.conv_kxk.' )
if F'layer_{i}.1.local_rep.1.' in k:
lowerCamelCase : List[str] = k_new.replace(F'layer_{i}.1.local_rep.1.' , F'{model_prefix}encoder.layer.{i-1}.conv_1x1.' )
for i in [3, 4, 5]:
if i == 3:
lowerCamelCase : Optional[int] = [0, 1]
elif i == 4:
lowerCamelCase : Dict = [0, 1, 2, 3]
elif i == 5:
lowerCamelCase : Dict = [0, 1, 2]
for j in j_in:
if F'layer_{i}.1.global_rep.{j}.' in k:
lowerCamelCase : int = k_new.replace(
F'layer_{i}.1.global_rep.{j}.' , F'{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.' )
if F'layer_{i}.1.global_rep.{j+1}.' in k:
lowerCamelCase : Optional[Any] = k_new.replace(
F'layer_{i}.1.global_rep.{j+1}.' , F'{model_prefix}encoder.layer.{i-1}.layernorm.' )
if F'layer_{i}.1.conv_proj.' in k:
lowerCamelCase : Dict = k_new.replace(F'layer_{i}.1.conv_proj.' , F'{model_prefix}encoder.layer.{i-1}.conv_projection.' )
if "pre_norm_attn.0." in k:
lowerCamelCase : str = k_new.replace("""pre_norm_attn.0.""" , """layernorm_before.""" )
if "pre_norm_attn.1." in k:
lowerCamelCase : Optional[Any] = k_new.replace("""pre_norm_attn.1.""" , """attention.""" )
if "pre_norm_ffn.0." in k:
lowerCamelCase : List[Any] = k_new.replace("""pre_norm_ffn.0.""" , """layernorm_after.""" )
if "pre_norm_ffn.1." in k:
lowerCamelCase : List[str] = k_new.replace("""pre_norm_ffn.1.""" , """ffn.conv1.""" )
if "pre_norm_ffn.3." in k:
lowerCamelCase : Optional[int] = k_new.replace("""pre_norm_ffn.3.""" , """ffn.conv2.""" )
if "classifier.1." in k:
lowerCamelCase : Optional[int] = k_new.replace("""classifier.1.""" , """classifier.""" )
if "seg_head." in k:
lowerCamelCase : Dict = k_new.replace("""seg_head.""" , """segmentation_head.""" )
if ".aspp_layer." in k:
lowerCamelCase : str = k_new.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in k:
lowerCamelCase : Any = k_new.replace(""".aspp_pool.""" , """.""" )
rename_keys.append((k, k_new) )
return rename_keys
def snake_case ( UpperCamelCase__ : Tuple ) -> Optional[Any]:
lowerCamelCase : List[str] = []
for k in state_dict.keys():
if k.startswith("""seg_head.aux_head.""" ):
keys_to_ignore.append(UpperCamelCase__ )
for k in keys_to_ignore:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
def snake_case ( ) -> int:
lowerCamelCase : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
lowerCamelCase : List[Any] = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return im
@torch.no_grad()
def snake_case ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict ) -> int:
lowerCamelCase : Any = get_mobilevitva_config(UpperCamelCase__ , UpperCamelCase__ )
# load original state_dict
lowerCamelCase : Any = torch.load(UpperCamelCase__ , map_location="""cpu""" )
# load huggingface model
if task_name.startswith("""ade20k_""" ) or task_name.startswith("""voc_""" ):
lowerCamelCase : List[str] = MobileViTVaForSemanticSegmentation(UpperCamelCase__ ).eval()
lowerCamelCase : List[Any] = False
else:
lowerCamelCase : int = MobileViTVaForImageClassification(UpperCamelCase__ ).eval()
lowerCamelCase : Dict = False
# remove and rename some keys of load the original model
lowerCamelCase : List[str] = checkpoint
remove_unused_keys(UpperCamelCase__ )
lowerCamelCase : List[Any] = create_rename_keys(UpperCamelCase__ , base_model=UpperCamelCase__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# load modified state_dict
model.load_state_dict(UpperCamelCase__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCamelCase : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowerCamelCase : Union[str, Any] = image_processor(images=prepare_img() , return_tensors="""pt""" )
lowerCamelCase : Tuple = model(**UpperCamelCase__ )
# verify classification model
if task_name.startswith("""imagenet""" ):
lowerCamelCase : Tuple = outputs.logits
lowerCamelCase : int = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("""imagenet1k_256""" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
lowerCamelCase : Optional[Any] = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] )
assert torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1E-4 )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(F'Saving model {task_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCamelCase__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__lowerCamelCase :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task',
default='imagenet1k_256',
type=str,
help=(
'Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '
'\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n '
),
choices=[
'imagenet1k_256',
'imagenet1k_384',
'imagenet21k_to_1k_256',
'imagenet21k_to_1k_384',
'ade20k_deeplabv3',
'voc_deeplabv3',
],
)
parser.add_argument(
'--orig_checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument('--orig_config_path', required=True, type=str, help='Path to the original config file.')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
__lowerCamelCase :Any = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 712
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCamelCase :Optional[Any] = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Union[str, Any] = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 0
|
"""simple docstring"""
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
__lowerCamelCase :List[str] = logging.get_logger(__name__)
__lowerCamelCase :str = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
__lowerCamelCase :Tuple = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
__lowerCamelCase :Dict = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
__lowerCamelCase :List[Any] = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
__lowerCamelCase :Dict = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
__lowerCamelCase :List[str] = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
__lowerCamelCase :Optional[int] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
__lowerCamelCase :Optional[Any] = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
__lowerCamelCase :Any = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
__lowerCamelCase :Optional[int] = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
__lowerCamelCase :Union[str, Any] = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
__lowerCamelCase :Optional[Any] = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
__lowerCamelCase :List[Any] = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
__lowerCamelCase :Union[str, Any] = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
__lowerCamelCase :Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
__lowerCamelCase :List[str] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
__lowerCamelCase :List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
__lowerCamelCase :Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
__lowerCamelCase :Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
__lowerCamelCase :Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
__lowerCamelCase :Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
__lowerCamelCase :Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
__lowerCamelCase :Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
__lowerCamelCase :Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
__lowerCamelCase :Dict = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
__lowerCamelCase :List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
__lowerCamelCase :Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
__lowerCamelCase :List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class A__ ( _BaseAutoModelClass):
"""simple docstring"""
snake_case__ : Optional[Any] =FLAX_MODEL_MAPPING
__lowerCamelCase :Dict = auto_class_update(FlaxAutoModel)
class A__ ( _BaseAutoModelClass):
"""simple docstring"""
snake_case__ : List[Any] =FLAX_MODEL_FOR_PRETRAINING_MAPPING
__lowerCamelCase :Any = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class A__ ( _BaseAutoModelClass):
"""simple docstring"""
snake_case__ : List[Any] =FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
__lowerCamelCase :List[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class A__ ( _BaseAutoModelClass):
"""simple docstring"""
snake_case__ : Optional[int] =FLAX_MODEL_FOR_MASKED_LM_MAPPING
__lowerCamelCase :Any = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class A__ ( _BaseAutoModelClass):
"""simple docstring"""
snake_case__ : Optional[Any] =FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__lowerCamelCase :List[Any] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class A__ ( _BaseAutoModelClass):
"""simple docstring"""
snake_case__ : int =FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__lowerCamelCase :Optional[int] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class A__ ( _BaseAutoModelClass):
"""simple docstring"""
snake_case__ : Dict =FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
__lowerCamelCase :Tuple = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class A__ ( _BaseAutoModelClass):
"""simple docstring"""
snake_case__ : List[Any] =FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__lowerCamelCase :List[Any] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class A__ ( _BaseAutoModelClass):
"""simple docstring"""
snake_case__ : Optional[Any] =FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
__lowerCamelCase :Dict = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class A__ ( _BaseAutoModelClass):
"""simple docstring"""
snake_case__ : Dict =FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
__lowerCamelCase :List[str] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class A__ ( _BaseAutoModelClass):
"""simple docstring"""
snake_case__ : List[Any] =FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__lowerCamelCase :Any = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class A__ ( _BaseAutoModelClass):
"""simple docstring"""
snake_case__ : Dict =FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
__lowerCamelCase :Any = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class A__ ( _BaseAutoModelClass):
"""simple docstring"""
snake_case__ : List[str] =FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
__lowerCamelCase :Any = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 713
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self: Optional[Any] , __a: Union[str, Any] , __a: Optional[Any]=13 , __a: Optional[Any]=32 , __a: Dict=3 , __a: int=4 , __a: Dict=[10, 20, 30, 40] , __a: int=[2, 2, 3, 2] , __a: Any=True , __a: List[Any]=True , __a: Any=37 , __a: Optional[int]="gelu" , __a: List[str]=10 , __a: Optional[int]=0.02 , __a: Dict=["stage2", "stage3", "stage4"] , __a: List[str]=[2, 3, 4] , __a: List[str]=None , )-> Union[str, Any]:
lowerCamelCase : Optional[int] = parent
lowerCamelCase : Optional[int] = batch_size
lowerCamelCase : Any = image_size
lowerCamelCase : Tuple = num_channels
lowerCamelCase : str = num_stages
lowerCamelCase : List[str] = hidden_sizes
lowerCamelCase : str = depths
lowerCamelCase : Dict = is_training
lowerCamelCase : Optional[Any] = use_labels
lowerCamelCase : List[str] = intermediate_size
lowerCamelCase : List[str] = hidden_act
lowerCamelCase : List[str] = num_labels
lowerCamelCase : Union[str, Any] = initializer_range
lowerCamelCase : List[Any] = out_features
lowerCamelCase : Optional[Any] = out_indices
lowerCamelCase : int = scope
def a__ ( self: str )-> Optional[Any]:
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Dict = None
if self.use_labels:
lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase : Any = self.get_config()
return config, pixel_values, labels
def a__ ( self: Dict )-> Union[str, Any]:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a__ ( self: Optional[Any] , __a: List[Any] , __a: Any , __a: int )-> List[Any]:
lowerCamelCase : Optional[int] = ConvNextModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ ( self: int , __a: Union[str, Any] , __a: List[Any] , __a: Tuple )-> Optional[int]:
lowerCamelCase : str = ConvNextForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self: List[Any] , __a: Any , __a: Optional[int] , __a: Tuple )-> List[str]:
lowerCamelCase : List[str] = ConvNextBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCamelCase : Tuple = None
lowerCamelCase : List[str] = ConvNextBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self: Optional[Any] )-> Any:
lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = config_and_inputs
lowerCamelCase : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : int =(
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
snake_case__ : str =(
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
snake_case__ : Union[str, Any] =True
snake_case__ : Optional[int] =False
snake_case__ : Tuple =False
snake_case__ : Union[str, Any] =False
snake_case__ : Tuple =False
def a__ ( self: Optional[Any] )-> Union[str, Any]:
lowerCamelCase : Tuple = ConvNextModelTester(self )
lowerCamelCase : List[Any] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def a__ ( self: Optional[int] )-> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self: Optional[int] )-> Optional[Any]:
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def a__ ( self: int )-> Dict:
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def a__ ( self: Dict )-> Optional[Any]:
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def a__ ( self: int )-> List[Any]:
pass
def a__ ( self: Union[str, Any] )-> int:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Any = model_class(__a )
lowerCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
lowerCamelCase : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def a__ ( self: Optional[int] )-> str:
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: str )-> int:
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def a__ ( self: int )-> Optional[int]:
def check_hidden_states_output(__a: Tuple , __a: int , __a: Tuple ):
lowerCamelCase : str = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase : Tuple = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__a ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase , lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : List[Any] = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Tuple = True
check_hidden_states_output(__a , __a , __a )
def a__ ( self: Dict )-> Optional[Any]:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def a__ ( self: Optional[Any] )-> Tuple:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : str = ConvNextModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case ( ) -> Optional[int]:
lowerCamelCase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def a__ ( self: Dict )-> Union[str, Any]:
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def a__ ( self: List[str] )-> Dict:
lowerCamelCase : Tuple = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(__a )
lowerCamelCase : Dict = self.default_image_processor
lowerCamelCase : Union[str, Any] = prepare_img()
lowerCamelCase : Optional[Any] = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
lowerCamelCase : Any = model(**__a )
# verify the logits
lowerCamelCase : Union[str, Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase : Tuple = torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
@require_torch
class A__ ( unittest.TestCase , __lowercase):
"""simple docstring"""
snake_case__ : Union[str, Any] =(ConvNextBackbone,) if is_torch_available() else ()
snake_case__ : Optional[Any] =ConvNextConfig
snake_case__ : Optional[Any] =False
def a__ ( self: List[str] )-> int:
lowerCamelCase : Dict = ConvNextModelTester(self )
| 42
| 0
|
"""simple docstring"""
import os
import pytest
from transformers.dynamic_module_utils import get_imports
__lowerCamelCase :Optional[Any] = '\nimport os\n'
__lowerCamelCase :List[str] = '\ndef foo():\n import os\n return False\n'
__lowerCamelCase :List[str] = '\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n'
__lowerCamelCase :Dict = '\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n'
__lowerCamelCase :List[str] = '\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n'
__lowerCamelCase :int = '\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n'
__lowerCamelCase :Optional[Any] = '\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n'
__lowerCamelCase :Optional[int] = '\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n'
__lowerCamelCase :Any = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n'
__lowerCamelCase :int = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n'
__lowerCamelCase :Tuple = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize("""case""" , UpperCamelCase__ )
def snake_case ( UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple ) -> Tuple:
lowerCamelCase : Dict = os.path.join(UpperCamelCase__ , """test_file.py""" )
with open(UpperCamelCase__ , """w""" ) as _tmp_file:
_tmp_file.write(UpperCamelCase__ )
lowerCamelCase : Optional[Any] = get_imports(UpperCamelCase__ )
assert parsed_imports == ["os"]
| 714
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Optional[int] = logging.get_logger(__name__)
__lowerCamelCase :List[str] = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Optional[Any] ='''realm'''
def __init__( self: Union[str, Any] , __a: List[Any]=30_522 , __a: List[Any]=768 , __a: List[Any]=128 , __a: Union[str, Any]=12 , __a: Union[str, Any]=12 , __a: Optional[Any]=8 , __a: Dict=3_072 , __a: List[Any]="gelu_new" , __a: List[Any]=0.1 , __a: Tuple=0.1 , __a: Optional[Any]=512 , __a: Optional[int]=2 , __a: str=0.02 , __a: int=1e-1_2 , __a: Optional[Any]=256 , __a: Any=10 , __a: Dict=1e-3 , __a: Optional[Any]=5 , __a: Dict=320 , __a: Tuple=13_353_718 , __a: List[Any]=5_000 , __a: Dict=1 , __a: int=0 , __a: Dict=2 , **__a: List[str] , )-> Any:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
# Common config
lowerCamelCase : Optional[Any] = vocab_size
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : Dict = hidden_size
lowerCamelCase : Dict = retriever_proj_size
lowerCamelCase : Optional[Any] = num_hidden_layers
lowerCamelCase : List[str] = num_attention_heads
lowerCamelCase : Tuple = num_candidates
lowerCamelCase : int = intermediate_size
lowerCamelCase : Dict = hidden_act
lowerCamelCase : List[str] = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Optional[int] = initializer_range
lowerCamelCase : Dict = type_vocab_size
lowerCamelCase : Optional[Any] = layer_norm_eps
# Reader config
lowerCamelCase : List[str] = span_hidden_size
lowerCamelCase : Dict = max_span_width
lowerCamelCase : Optional[Any] = reader_layer_norm_eps
lowerCamelCase : Optional[int] = reader_beam_size
lowerCamelCase : List[Any] = reader_seq_len
# Retrieval config
lowerCamelCase : int = num_block_records
lowerCamelCase : Dict = searcher_beam_size
| 42
| 0
|
"""simple docstring"""
def snake_case ( UpperCamelCase__ : str ) -> str:
if not all(char in """01""" for char in bin_string ):
raise ValueError("""Non-binary value was passed to the function""" )
if not bin_string:
raise ValueError("""Empty string was passed to the function""" )
lowerCamelCase : str = """"""
while len(UpperCamelCase__ ) % 3 != 0:
lowerCamelCase : Optional[Any] = """0""" + bin_string
lowerCamelCase : Optional[Any] = [
bin_string[index : index + 3]
for index in range(len(UpperCamelCase__ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
lowerCamelCase : int = 0
for index, val in enumerate(UpperCamelCase__ ):
oct_val += int(2 ** (2 - index) * int(UpperCamelCase__ ) )
oct_string += str(UpperCamelCase__ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 715
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Tuple = logging.get_logger(__name__)
__lowerCamelCase :Any = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple ='''glpn'''
def __init__( self: Dict , __a: List[str]=3 , __a: Optional[int]=4 , __a: Dict=[2, 2, 2, 2] , __a: str=[8, 4, 2, 1] , __a: Optional[int]=[32, 64, 160, 256] , __a: Dict=[7, 3, 3, 3] , __a: Dict=[4, 2, 2, 2] , __a: Optional[Any]=[1, 2, 5, 8] , __a: Tuple=[4, 4, 4, 4] , __a: int="gelu" , __a: Union[str, Any]=0.0 , __a: str=0.0 , __a: Union[str, Any]=0.02 , __a: str=0.1 , __a: Union[str, Any]=1e-6 , __a: Any=64 , __a: Dict=10 , __a: Union[str, Any]=-1 , **__a: Optional[Any] , )-> Dict:
super().__init__(**__a )
lowerCamelCase : Dict = num_channels
lowerCamelCase : Any = num_encoder_blocks
lowerCamelCase : Dict = depths
lowerCamelCase : List[str] = sr_ratios
lowerCamelCase : Dict = hidden_sizes
lowerCamelCase : Tuple = patch_sizes
lowerCamelCase : Optional[int] = strides
lowerCamelCase : Optional[Any] = mlp_ratios
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : List[str] = hidden_act
lowerCamelCase : Any = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : List[Any] = initializer_range
lowerCamelCase : Dict = drop_path_rate
lowerCamelCase : Any = layer_norm_eps
lowerCamelCase : Optional[Any] = decoder_hidden_size
lowerCamelCase : Tuple = max_depth
lowerCamelCase : Optional[Any] = head_in_index
| 42
| 0
|
"""simple docstring"""
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
__lowerCamelCase :Optional[Any] = 'scheduler_config.json'
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : List[Any] =1
snake_case__ : Tuple =2
snake_case__ : Dict =3
snake_case__ : Tuple =4
snake_case__ : Tuple =5
@dataclass
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : jnp.ndarray
class A__ :
"""simple docstring"""
snake_case__ : Tuple =SCHEDULER_CONFIG_NAME
snake_case__ : Union[str, Any] =['''dtype''']
snake_case__ : Union[str, Any] =[]
snake_case__ : Any =True
@classmethod
def a__ ( cls: Union[str, Any] , __a: Dict[str, Any] = None , __a: Optional[str] = None , __a: Dict=False , **__a: Any , )-> Optional[int]:
lowerCamelCase : Optional[Any] = cls.load_config(
pretrained_model_name_or_path=__a , subfolder=__a , return_unused_kwargs=__a , **__a , )
lowerCamelCase : List[Any] = cls.from_config(__a , return_unused_kwargs=__a , **__a )
if hasattr(__a , """create_state""" ) and getattr(__a , """has_state""" , __a ):
lowerCamelCase : int = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def a__ ( self: List[Any] , __a: Union[str, os.PathLike] , __a: bool = False , **__a: int )-> Optional[Any]:
self.save_config(save_directory=__a , push_to_hub=__a , **__a )
@property
def a__ ( self: str )-> int:
return self._get_compatibles()
@classmethod
def a__ ( cls: int )-> Union[str, Any]:
lowerCamelCase : List[Any] = list(set([cls.__name__] + cls._compatibles ) )
lowerCamelCase : int = importlib.import_module(__name__.split(""".""" )[0] )
lowerCamelCase : List[str] = [
getattr(__a , __a ) for c in compatible_classes_str if hasattr(__a , __a )
]
return compatible_classes
def snake_case ( UpperCamelCase__ : jnp.ndarray , UpperCamelCase__ : Tuple[int] ) -> jnp.ndarray:
assert len(UpperCamelCase__ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(UpperCamelCase__ ) - x.ndim) ) , UpperCamelCase__ )
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : str=0.9_9_9 , UpperCamelCase__ : Dict=jnp.floataa ) -> jnp.ndarray:
def alpha_bar(UpperCamelCase__ : List[Any] ):
return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
lowerCamelCase : Optional[Any] = []
for i in range(UpperCamelCase__ ):
lowerCamelCase : Tuple = i / num_diffusion_timesteps
lowerCamelCase : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(UpperCamelCase__ ) / alpha_bar(UpperCamelCase__ ) , UpperCamelCase__ ) )
return jnp.array(UpperCamelCase__ , dtype=UpperCamelCase__ )
@flax.struct.dataclass
class A__ :
"""simple docstring"""
snake_case__ : jnp.ndarray
snake_case__ : jnp.ndarray
snake_case__ : jnp.ndarray
@classmethod
def a__ ( cls: Optional[Any] , __a: List[str] )-> Optional[Any]:
lowerCamelCase : Optional[int] = scheduler.config
if config.trained_betas is not None:
lowerCamelCase : List[str] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
lowerCamelCase : str = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowerCamelCase : Optional[Any] = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowerCamelCase : int = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' )
lowerCamelCase : Any = 1.0 - betas
lowerCamelCase : Dict = jnp.cumprod(__a , axis=0 )
return cls(
alphas=__a , betas=__a , alphas_cumprod=__a , )
def snake_case ( UpperCamelCase__ : CommonSchedulerState , UpperCamelCase__ : jnp.ndarray , UpperCamelCase__ : jnp.ndarray , UpperCamelCase__ : jnp.ndarray ) -> List[str]:
lowerCamelCase : str = state.alphas_cumprod
lowerCamelCase : Optional[int] = alphas_cumprod[timesteps] ** 0.5
lowerCamelCase : str = sqrt_alpha_prod.flatten()
lowerCamelCase : Optional[Any] = broadcast_to_shape_from_left(UpperCamelCase__ , original_samples.shape )
lowerCamelCase : Tuple = (1 - alphas_cumprod[timesteps]) ** 0.5
lowerCamelCase : Optional[int] = sqrt_one_minus_alpha_prod.flatten()
lowerCamelCase : Any = broadcast_to_shape_from_left(UpperCamelCase__ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def snake_case ( UpperCamelCase__ : CommonSchedulerState , UpperCamelCase__ : jnp.ndarray , UpperCamelCase__ : jnp.ndarray , UpperCamelCase__ : jnp.ndarray ) -> Tuple:
lowerCamelCase : int = get_sqrt_alpha_prod(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Tuple = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def snake_case ( UpperCamelCase__ : CommonSchedulerState , UpperCamelCase__ : jnp.ndarray , UpperCamelCase__ : jnp.ndarray , UpperCamelCase__ : jnp.ndarray ) -> List[Any]:
lowerCamelCase : Optional[Any] = get_sqrt_alpha_prod(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Optional[int] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 716
|
"""simple docstring"""
from __future__ import annotations
import math
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : int ) -> float:
lowerCamelCase : Dict = u
for i in range(1 , UpperCamelCase__ ):
lowerCamelCase : List[str] = temp * (u - i)
return temp
def snake_case ( ) -> None:
lowerCamelCase : List[Any] = int(input("""enter the numbers of values: """ ) )
lowerCamelCase : list[list[float]] = []
for _ in range(UpperCamelCase__ ):
y.append([] )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
y[i].append(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = 0
print("""enter the values of parameters in a list: """ )
lowerCamelCase : Any = list(map(UpperCamelCase__ , input().split() ) )
print("""enter the values of corresponding parameters: """ )
for i in range(UpperCamelCase__ ):
lowerCamelCase : int = float(input() )
lowerCamelCase : Dict = int(input("""enter the value to interpolate: """ ) )
lowerCamelCase : List[Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , UpperCamelCase__ ):
for j in range(n - i ):
lowerCamelCase : str = y[j + 1][i - 1] - y[j][i - 1]
lowerCamelCase : Any = y[0][0]
for i in range(1 , UpperCamelCase__ ):
summ += (ucal(UpperCamelCase__ , UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 42
| 0
|
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__lowerCamelCase :Tuple = logging.get_logger(__name__)
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : List[str] =['''input_features''', '''is_longer''']
def __init__( self: Optional[Any] , __a: str=64 , __a: List[str]=48_000 , __a: List[Any]=480 , __a: Dict=10 , __a: List[Any]=1_024 , __a: str=0.0 , __a: int=False , __a: float = 0 , __a: float = 14_000 , __a: int = None , __a: str = "fusion" , __a: str = "repeatpad" , **__a: Union[str, Any] , )-> Dict:
super().__init__(
feature_size=__a , sampling_rate=__a , padding_value=__a , return_attention_mask=__a , **__a , )
lowerCamelCase : List[Any] = top_db
lowerCamelCase : Optional[int] = truncation
lowerCamelCase : Any = padding
lowerCamelCase : Dict = fft_window_size
lowerCamelCase : Tuple = (fft_window_size >> 1) + 1
lowerCamelCase : Dict = hop_length
lowerCamelCase : Tuple = max_length_s
lowerCamelCase : Any = max_length_s * sampling_rate
lowerCamelCase : Optional[int] = sampling_rate
lowerCamelCase : str = frequency_min
lowerCamelCase : Optional[int] = frequency_max
lowerCamelCase : str = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__a , min_frequency=__a , max_frequency=__a , sampling_rate=__a , norm=__a , mel_scale="""htk""" , )
lowerCamelCase : Optional[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=__a , min_frequency=__a , max_frequency=__a , sampling_rate=__a , norm="""slaney""" , mel_scale="""slaney""" , )
def a__ ( self: Any )-> Dict[str, Any]:
lowerCamelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
lowerCamelCase : Optional[Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def a__ ( self: Any , __a: np.array , __a: Optional[np.array] = None )-> np.ndarray:
lowerCamelCase : Optional[Any] = spectrogram(
__a , window_function(self.fft_window_size , """hann""" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=__a , log_mel="""dB""" , )
return log_mel_spectrogram.T
def a__ ( self: Union[str, Any] , __a: Union[str, Any] , __a: Optional[Any] , __a: Any )-> Optional[int]:
lowerCamelCase : List[str] = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCamelCase : Any = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCamelCase : List[Any] = [0]
# randomly choose index for each part
lowerCamelCase : int = np.random.choice(ranges[0] )
lowerCamelCase : List[str] = np.random.choice(ranges[1] )
lowerCamelCase : str = np.random.choice(ranges[2] )
lowerCamelCase : Tuple = mel[idx_front : idx_front + chunk_frames, :]
lowerCamelCase : str = mel[idx_middle : idx_middle + chunk_frames, :]
lowerCamelCase : str = mel[idx_back : idx_back + chunk_frames, :]
lowerCamelCase : Union[str, Any] = torch.tensor(mel[None, None, :] )
lowerCamelCase : Dict = torch.nn.functional.interpolate(
__a , size=[chunk_frames, 64] , mode="""bilinear""" , align_corners=__a )
lowerCamelCase : str = mel_shrink[0][0].numpy()
lowerCamelCase : Optional[int] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def a__ ( self: Union[str, Any] , __a: np.array , __a: List[Any] , __a: str , __a: List[Any] )-> np.array:
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowerCamelCase : int = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowerCamelCase : str = len(__a ) - max_length
lowerCamelCase : Optional[Any] = np.random.randint(0 , overflow + 1 )
lowerCamelCase : Union[str, Any] = waveform[idx : idx + max_length]
lowerCamelCase : List[Any] = self._np_extract_fbank_features(__a , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowerCamelCase : Optional[Any] = self._np_extract_fbank_features(__a , self.mel_filters )
lowerCamelCase : Tuple = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowerCamelCase : str = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowerCamelCase : Any = np.stack([mel, mel, mel, mel] , axis=0 )
lowerCamelCase : List[str] = False
else:
lowerCamelCase : Any = self._random_mel_fusion(__a , __a , __a )
lowerCamelCase : Optional[Any] = True
else:
raise NotImplementedError(f'data_truncating {truncation} not implemented' )
else:
lowerCamelCase : Dict = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowerCamelCase : Tuple = int(max_length / len(__a ) )
lowerCamelCase : Dict = np.stack(np.tile(__a , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowerCamelCase : Optional[Any] = int(max_length / len(__a ) )
lowerCamelCase : Optional[Any] = np.stack(np.tile(__a , __a ) )
lowerCamelCase : Optional[Any] = np.pad(__a , (0, max_length - waveform.shape[0]) , mode="""constant""" , constant_values=0 )
if truncation == "fusion":
lowerCamelCase : str = self._np_extract_fbank_features(__a , self.mel_filters )
lowerCamelCase : Optional[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
lowerCamelCase : Dict = self._np_extract_fbank_features(__a , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self: List[Any] , __a: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __a: str = None , __a: Optional[str] = None , __a: Optional[int] = None , __a: Optional[int] = None , __a: Optional[Union[str, TensorType]] = None , **__a: str , )-> BatchFeature:
lowerCamelCase : Optional[Any] = truncation if truncation is not None else self.truncation
lowerCamelCase : str = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowerCamelCase : int = isinstance(__a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
lowerCamelCase : str = is_batched_numpy or (
isinstance(__a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase : Tuple = [np.asarray(__a , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__a , np.ndarray ):
lowerCamelCase : Optional[Any] = np.asarray(__a , dtype=np.floataa )
elif isinstance(__a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase : List[Any] = [np.asarray(__a )]
# convert to mel spectrogram, truncate and pad if needed.
lowerCamelCase : Optional[Any] = [
self._get_input_mel(__a , max_length if max_length else self.nb_max_samples , __a , __a )
for waveform in raw_speech
]
lowerCamelCase : Optional[int] = []
lowerCamelCase : Union[str, Any] = []
for mel, longer in padded_inputs:
input_mel.append(__a )
is_longer.append(__a )
if truncation == "fusion" and sum(__a ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowerCamelCase : Optional[Any] = np.random.randint(0 , len(__a ) )
lowerCamelCase : Union[str, Any] = True
if isinstance(input_mel[0] , __a ):
lowerCamelCase : List[str] = [np.asarray(__a , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowerCamelCase : Tuple = [[longer] for longer in is_longer]
lowerCamelCase : Union[str, Any] = {"""input_features""": input_mel, """is_longer""": is_longer}
lowerCamelCase : Any = BatchFeature(__a )
if return_tensors is not None:
lowerCamelCase : str = input_features.convert_to_tensors(__a )
return input_features
| 717
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__lowerCamelCase :str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[Any] = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
__lowerCamelCase :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase :Dict = {
'configuration_clipseg': [
'CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPSegConfig',
'CLIPSegTextConfig',
'CLIPSegVisionConfig',
],
'processing_clipseg': ['CLIPSegProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :List[str] = [
'CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPSegModel',
'CLIPSegPreTrainedModel',
'CLIPSegTextModel',
'CLIPSegVisionModel',
'CLIPSegForImageSegmentation',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
__lowerCamelCase :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 718
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase :Dict = logging.get_logger()
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : LevitConfig , UpperCamelCase__ : Path , UpperCamelCase__ : bool = True ) -> Dict:
print(F'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
lowerCamelCase : Optional[Any] = timm.create_model("""levit_128s""" , pretrained=UpperCamelCase__ )
else:
lowerCamelCase : Dict = timm.create_model("""levit_128""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 192:
lowerCamelCase : Tuple = timm.create_model("""levit_192""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 256:
lowerCamelCase : Optional[int] = timm.create_model("""levit_256""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 384:
lowerCamelCase : Dict = timm.create_model("""levit_384""" , pretrained=UpperCamelCase__ )
from_model.eval()
lowerCamelCase : Optional[Any] = LevitForImageClassificationWithTeacher(UpperCamelCase__ ).eval()
lowerCamelCase : Tuple = OrderedDict()
lowerCamelCase : Optional[Any] = from_model.state_dict()
lowerCamelCase : str = list(from_model.state_dict().keys() )
lowerCamelCase : List[Any] = list(our_model.state_dict().keys() )
print(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for i in range(len(UpperCamelCase__ ) ):
lowerCamelCase : str = weights[og_keys[i]]
our_model.load_state_dict(UpperCamelCase__ )
lowerCamelCase : int = torch.randn((2, 3, 224, 224) )
lowerCamelCase : Any = from_model(UpperCamelCase__ )
lowerCamelCase : List[Any] = our_model(UpperCamelCase__ ).logits
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ ), "The model logits don't match the original one."
lowerCamelCase : Dict = name
print(UpperCamelCase__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowerCamelCase : Optional[int] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'Pushed {checkpoint_name}' )
def snake_case ( UpperCamelCase__ : Path , UpperCamelCase__ : str = None , UpperCamelCase__ : bool = True ) -> Optional[int]:
lowerCamelCase : Optional[Any] = """imagenet-1k-id2label.json"""
lowerCamelCase : List[Any] = 1000
lowerCamelCase : Dict = (1, num_labels)
lowerCamelCase : List[Any] = """huggingface/label-files"""
lowerCamelCase : Optional[int] = num_labels
lowerCamelCase : List[str] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase : Any = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase : List[Any] = idalabel
lowerCamelCase : str = {v: k for k, v in idalabel.items()}
lowerCamelCase : Tuple = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ )
lowerCamelCase : Optional[int] = {
"""levit-128S""": 128,
"""levit-128""": 128,
"""levit-192""": 192,
"""levit-256""": 256,
"""levit-384""": 384,
}
lowerCamelCase : List[Any] = {
"""levit-128S""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-128""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-192""": ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-256""": ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-384""": ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return config, expected_shape
if __name__ == "__main__":
__lowerCamelCase :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
__lowerCamelCase :List[Any] = parser.parse_args()
__lowerCamelCase :Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 42
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
__lowerCamelCase :Tuple = '2020.9.26'
__lowerCamelCase :List[str] = 'xcodz-dot, cclaus, dhruvmanila'
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> tuple[float, float]:
if not all(isinstance(UpperCamelCase__ , (float, int) ) for val in locals().values() ):
lowerCamelCase : Optional[Any] = F'Input values must either be float or int: {list(locals().values() )}'
raise TypeError(UpperCamelCase__ )
lowerCamelCase : int = ((x * distance) / (z + distance)) * scale
lowerCamelCase : Optional[int] = ((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : str , UpperCamelCase__ : float ) -> tuple[float, float, float]:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError("""Axis must be a str""" )
lowerCamelCase : Any = locals()
del input_variables["axis"]
if not all(isinstance(UpperCamelCase__ , (float, int) ) for val in input_variables.values() ):
lowerCamelCase : str = (
"""Input values except axis must either be float or int: """
F'{list(input_variables.values() )}'
)
raise TypeError(UpperCamelCase__ )
lowerCamelCase : Optional[int] = (angle % 360) / 450 * 180 / math.pi
if axis == "z":
lowerCamelCase : Optional[Any] = x * math.cos(UpperCamelCase__ ) - y * math.sin(UpperCamelCase__ )
lowerCamelCase : Optional[int] = y * math.cos(UpperCamelCase__ ) + x * math.sin(UpperCamelCase__ )
lowerCamelCase : str = z
elif axis == "x":
lowerCamelCase : str = y * math.cos(UpperCamelCase__ ) - z * math.sin(UpperCamelCase__ )
lowerCamelCase : int = z * math.cos(UpperCamelCase__ ) + y * math.sin(UpperCamelCase__ )
lowerCamelCase : Dict = x
elif axis == "y":
lowerCamelCase : List[Any] = x * math.cos(UpperCamelCase__ ) - z * math.sin(UpperCamelCase__ )
lowerCamelCase : List[str] = z * math.cos(UpperCamelCase__ ) + x * math.sin(UpperCamelCase__ )
lowerCamelCase : List[Any] = y
else:
raise ValueError("""not a valid axis, choose one of 'x', 'y', 'z'""" )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(f"""{rotate(1.0, 2.0, 3.0, "y", 90.0) = }""")
| 719
|
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple =(KDPMaDiscreteScheduler,)
snake_case__ : Tuple =10
def a__ ( self: List[Any] , **__a: Optional[int] )-> Union[str, Any]:
lowerCamelCase : int = {
"""num_train_timesteps""": 1_100,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**__a )
return config
def a__ ( self: Union[str, Any] )-> Any:
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__a )
def a__ ( self: str )-> int:
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def a__ ( self: int )-> Union[str, Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a )
def a__ ( self: List[Any] )-> List[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def a__ ( self: Union[str, Any] )-> int:
lowerCamelCase : List[str] = self.scheduler_classes[0]
lowerCamelCase : Union[str, Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
lowerCamelCase : List[str] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase : Dict = self.dummy_model()
lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase : List[Any] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : Optional[Any] = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[int] = model(__a , __a )
lowerCamelCase : Tuple = scheduler.step(__a , __a , __a )
lowerCamelCase : Optional[Any] = output.prev_sample
lowerCamelCase : List[str] = torch.sum(torch.abs(__a ) )
lowerCamelCase : Tuple = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4e-0_7 ) < 1e-2
assert abs(result_mean.item() - 6.1_1_1_2e-1_0 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7 ) < 1e-2
assert abs(result_mean.item() - 0.00_02 ) < 1e-3
def a__ ( self: Any )-> Any:
if torch_device == "mps":
return
lowerCamelCase : Dict = self.scheduler_classes[0]
lowerCamelCase : Dict = self.get_scheduler_config()
lowerCamelCase : int = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase : List[Any] = self.dummy_model()
lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase : Optional[int] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : Dict = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[Any] = model(__a , __a )
lowerCamelCase : Tuple = scheduler.step(__a , __a , __a )
lowerCamelCase : str = output.prev_sample
lowerCamelCase : Tuple = torch.sum(torch.abs(__a ) )
lowerCamelCase : Tuple = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
def a__ ( self: Optional[Any] )-> List[Any]:
if torch_device == "mps":
return
lowerCamelCase : Any = self.scheduler_classes[0]
lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
lowerCamelCase : Optional[Any] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
lowerCamelCase : Union[str, Any] = self.dummy_model()
lowerCamelCase : List[str] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCamelCase : Union[str, Any] = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[int] = model(__a , __a )
lowerCamelCase : int = scheduler.step(__a , __a , __a )
lowerCamelCase : int = output.prev_sample
lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(__a ) )
lowerCamelCase : int = torch.mean(torch.abs(__a ) )
if str(__a ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
| 42
| 0
|
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__lowerCamelCase :Optional[int] = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def snake_case ( UpperCamelCase__ : Union[str, Any] ) -> Tuple:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ) -> List[str]:
if args.student_type == "roberta":
lowerCamelCase : List[Any] = False
elif args.student_type == "gpt2":
lowerCamelCase : Optional[int] = False
def snake_case ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : str ) -> Optional[int]:
if args.student_type == "roberta":
lowerCamelCase : str = False
def snake_case ( ) -> Optional[Any]:
lowerCamelCase : Optional[Any] = argparse.ArgumentParser(description="""Training""" )
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" )
parser.add_argument(
"""--dump_path""" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""The output directory (log, checkpoints, parameters, etc.)""" )
parser.add_argument(
"""--data_file""" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=UpperCamelCase__ , choices=["""distilbert""", """roberta""", """gpt2"""] , required=UpperCamelCase__ , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""Path to the student configuration.""" )
parser.add_argument(
"""--student_pretrained_weights""" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="""Load student initialization checkpoint.""" )
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=UpperCamelCase__ , help="""Teacher type (BERT, RoBERTa).""" )
parser.add_argument("""--teacher_name""" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""The teacher model.""" )
parser.add_argument("""--temperature""" , default=2.0 , type=UpperCamelCase__ , help="""Temperature for the softmax temperature.""" )
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=UpperCamelCase__ , help="""Linear weight for the distillation loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=UpperCamelCase__ , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=UpperCamelCase__ , help="""Linear weight for the CLM loss. Must be >=0.""" )
parser.add_argument("""--alpha_mse""" , default=0.0 , type=UpperCamelCase__ , help="""Linear weight of the MSE loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=UpperCamelCase__ , help="""Linear weight of the cosine embedding loss. Must be >=0.""" )
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" )
parser.add_argument(
"""--mlm_mask_prop""" , default=0.1_5 , type=UpperCamelCase__ , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=UpperCamelCase__ , help="""Proportion of tokens to mask out.""" )
parser.add_argument("""--word_keep""" , default=0.1 , type=UpperCamelCase__ , help="""Proportion of tokens to keep.""" )
parser.add_argument("""--word_rand""" , default=0.1 , type=UpperCamelCase__ , help="""Proportion of tokens to randomly replace.""" )
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=UpperCamelCase__ , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=UpperCamelCase__ , help="""The token counts in the data_file for MLM.""" )
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , )
parser.add_argument("""--n_epoch""" , type=UpperCamelCase__ , default=3 , help="""Number of pass on the whole dataset.""" )
parser.add_argument("""--batch_size""" , type=UpperCamelCase__ , default=5 , help="""Batch size (for each process).""" )
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=UpperCamelCase__ , default=50 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.0_5 , type=UpperCamelCase__ , help="""Linear warmup proportion.""" )
parser.add_argument("""--weight_decay""" , default=0.0 , type=UpperCamelCase__ , help="""Weight decay if we apply some.""" )
parser.add_argument("""--learning_rate""" , default=5E-4 , type=UpperCamelCase__ , help="""The initial learning rate for Adam.""" )
parser.add_argument("""--adam_epsilon""" , default=1E-6 , type=UpperCamelCase__ , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=UpperCamelCase__ , help="""Max gradient norm.""" )
parser.add_argument("""--initializer_range""" , default=0.0_2 , type=UpperCamelCase__ , help="""Random initialization range.""" )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=UpperCamelCase__ , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=UpperCamelCase__ , default=1 , help="""Number of GPUs in the node.""" )
parser.add_argument("""--local_rank""" , type=UpperCamelCase__ , default=-1 , help="""Distributed training - Local rank""" )
parser.add_argument("""--seed""" , type=UpperCamelCase__ , default=56 , help="""Random seed""" )
parser.add_argument("""--log_interval""" , type=UpperCamelCase__ , default=500 , help="""Tensorboard logging interval.""" )
parser.add_argument("""--checkpoint_interval""" , type=UpperCamelCase__ , default=4000 , help="""Checkpoint interval.""" )
lowerCamelCase : Union[str, Any] = parser.parse_args()
sanity_checks(UpperCamelCase__ )
# ARGS #
init_gpu_params(UpperCamelCase__ )
set_seed(UpperCamelCase__ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'
""" itUse `--force` if you want to overwrite it""" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'Experiment will be dumped and logged in {args.dump_path}' )
# SAVE PARAMS #
logger.info(F'Param: {args}' )
with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f:
json.dump(vars(UpperCamelCase__ ) , UpperCamelCase__ , indent=4 )
git_log(args.dump_path )
lowerCamelCase : str = MODEL_CLASSES[args.student_type]
lowerCamelCase : str = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
lowerCamelCase : Union[str, Any] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
lowerCamelCase : List[Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
lowerCamelCase : Union[str, Any] = tokenizer.all_special_tokens.index(UpperCamelCase__ )
lowerCamelCase : int = tokenizer.all_special_ids[idx]
logger.info(F'Special tokens {special_tok_ids}' )
lowerCamelCase : Any = special_tok_ids
lowerCamelCase : int = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'Loading data from {args.data_file}' )
with open(args.data_file , """rb""" ) as fp:
lowerCamelCase : Optional[int] = pickle.load(UpperCamelCase__ )
if args.mlm:
logger.info(F'Loading token counts from {args.token_counts} (already pre-computed)' )
with open(args.token_counts , """rb""" ) as fp:
lowerCamelCase : str = pickle.load(UpperCamelCase__ )
lowerCamelCase : List[Any] = np.maximum(UpperCamelCase__ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
lowerCamelCase : Union[str, Any] = 0.0 # do not predict special tokens
lowerCamelCase : List[str] = torch.from_numpy(UpperCamelCase__ )
else:
lowerCamelCase : Union[str, Any] = None
lowerCamelCase : int = LmSeqsDataset(params=UpperCamelCase__ , data=UpperCamelCase__ )
logger.info("""Data loader created.""" )
# STUDENT #
logger.info(F'Loading student config from {args.student_config}' )
lowerCamelCase : List[Any] = student_config_class.from_pretrained(args.student_config )
lowerCamelCase : Union[str, Any] = True
if args.student_pretrained_weights is not None:
logger.info(F'Loading pretrained weights from {args.student_pretrained_weights}' )
lowerCamelCase : str = student_model_class.from_pretrained(args.student_pretrained_weights , config=UpperCamelCase__ )
else:
lowerCamelCase : Optional[Any] = student_model_class(UpperCamelCase__ )
if args.n_gpu > 0:
student.to(F'cuda:{args.local_rank}' )
logger.info("""Student loaded.""" )
# TEACHER #
lowerCamelCase : str = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=UpperCamelCase__ )
if args.n_gpu > 0:
teacher.to(F'cuda:{args.local_rank}' )
logger.info(F'Teacher loaded from {args.teacher_name}.' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(UpperCamelCase__ , UpperCamelCase__ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(UpperCamelCase__ , UpperCamelCase__ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
lowerCamelCase : Dict = Distiller(
params=UpperCamelCase__ , dataset=UpperCamelCase__ , token_probs=UpperCamelCase__ , student=UpperCamelCase__ , teacher=UpperCamelCase__ )
distiller.train()
logger.info("""Let's go get some drinks.""" )
if __name__ == "__main__":
main()
| 720
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str =StableDiffusionXLImgaImgPipeline
snake_case__ : Any =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
snake_case__ : Optional[int] =PipelineTesterMixin.required_optional_params - {'''latents'''}
snake_case__ : Dict =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case__ : Tuple =IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case__ : List[str] =IMAGE_TO_IMAGE_IMAGE_PARAMS
def a__ ( self: List[str] )-> int:
torch.manual_seed(0 )
lowerCamelCase : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=__a , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
lowerCamelCase : Any = EulerDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
lowerCamelCase : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=32 , )
lowerCamelCase : Dict = CLIPTextModel(__a )
lowerCamelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__a )
lowerCamelCase : Dict = CLIPTextModelWithProjection(__a )
lowerCamelCase : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__a )
lowerCamelCase : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def a__ ( self: Any , __a: str , __a: Tuple=0 )-> Union[str, Any]:
lowerCamelCase : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
lowerCamelCase : Any = image / 2 + 0.5
if str(__a ).startswith("""mps""" ):
lowerCamelCase : Dict = torch.manual_seed(__a )
else:
lowerCamelCase : Tuple = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.75,
}
return inputs
def a__ ( self: Dict )-> Optional[Any]:
lowerCamelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase : Union[str, Any] = self.get_dummy_components()
lowerCamelCase : Optional[int] = StableDiffusionXLImgaImgPipeline(**__a )
lowerCamelCase : int = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Optional[Any] = self.get_dummy_inputs(__a )
lowerCamelCase : Optional[int] = sd_pipe(**__a ).images
lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase : Any = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self: Optional[int] )-> Union[str, Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def a__ ( self: Optional[Any] )-> str:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def a__ ( self: List[str] )-> Optional[Any]:
pass
def a__ ( self: List[Any] )-> Union[str, Any]:
lowerCamelCase : Tuple = self.get_dummy_components()
lowerCamelCase : Union[str, Any] = StableDiffusionXLImgaImgPipeline(**__a )
lowerCamelCase : str = sd_pipe.to(__a )
lowerCamelCase : Any = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
# forward without prompt embeds
lowerCamelCase : Dict = self.get_dummy_inputs(__a )
lowerCamelCase : Any = 3 * ["""this is a negative prompt"""]
lowerCamelCase : Optional[int] = negative_prompt
lowerCamelCase : Tuple = 3 * [inputs["""prompt"""]]
lowerCamelCase : List[Any] = sd_pipe(**__a )
lowerCamelCase : Optional[int] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCamelCase : Tuple = self.get_dummy_inputs(__a )
lowerCamelCase : List[Any] = 3 * ["""this is a negative prompt"""]
lowerCamelCase : Tuple = 3 * [inputs.pop("""prompt""" )]
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Union[str, Any] = sd_pipe.encode_prompt(__a , negative_prompt=__a )
lowerCamelCase : int = sd_pipe(
**__a , prompt_embeds=__a , negative_prompt_embeds=__a , pooled_prompt_embeds=__a , negative_pooled_prompt_embeds=__a , )
lowerCamelCase : Union[str, Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Dict )-> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self: Union[str, Any] , __a: Any , __a: Any="cpu" , __a: str=torch.floataa , __a: Any=0 )-> Optional[Any]:
lowerCamelCase : Optional[Any] = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : List[Any] = np.random.RandomState(__a ).standard_normal((1, 4, 64, 64) )
lowerCamelCase : List[str] = torch.from_numpy(__a ).to(device=__a , dtype=__a )
lowerCamelCase : int = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def a__ ( self: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Optional[int] = self.get_inputs(__a )
lowerCamelCase : Optional[Any] = pipe(**__a ).images
lowerCamelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCamelCase : List[str] = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 42
| 0
|
"""simple docstring"""
import argparse
import datetime
def snake_case ( UpperCamelCase__ : str ) -> str:
lowerCamelCase : Any = {
"""0""": """Sunday""",
"""1""": """Monday""",
"""2""": """Tuesday""",
"""3""": """Wednesday""",
"""4""": """Thursday""",
"""5""": """Friday""",
"""6""": """Saturday""",
}
lowerCamelCase : Any = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(UpperCamelCase__ ) < 11:
raise ValueError("""Must be 10 characters long""" )
# Get month
lowerCamelCase : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("""Month must be between 1 - 12""" )
lowerCamelCase : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get day
lowerCamelCase : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("""Date must be between 1 - 31""" )
# Get second separator
lowerCamelCase : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get year
lowerCamelCase : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
"""Year out of range. There has to be some sort of limit...right?""" )
# Get datetime obj for validation
lowerCamelCase : List[Any] = datetime.date(int(UpperCamelCase__ ) , int(UpperCamelCase__ ) , int(UpperCamelCase__ ) )
# Start math
if m <= 2:
lowerCamelCase : Optional[int] = y - 1
lowerCamelCase : int = m + 12
# maths var
lowerCamelCase : int = int(str(UpperCamelCase__ )[:2] )
lowerCamelCase : int = int(str(UpperCamelCase__ )[2:] )
lowerCamelCase : int = int(2.6 * m - 5.3_9 )
lowerCamelCase : int = int(c / 4 )
lowerCamelCase : int = int(k / 4 )
lowerCamelCase : int = int(d + k )
lowerCamelCase : int = int(t + u + v + x )
lowerCamelCase : int = int(z - (2 * c) )
lowerCamelCase : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" )
# Response
lowerCamelCase : str = F'Your date {date_input}, is a {days[str(UpperCamelCase__ )]}!'
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase :List[str] = argparse.ArgumentParser(
description=(
'Find out what day of the week nearly any date is or was. Enter '
'date as a string in the mm-dd-yyyy or mm/dd/yyyy format'
)
)
parser.add_argument(
'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)'
)
__lowerCamelCase :Dict = parser.parse_args()
zeller(args.date_input)
| 721
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class A__ :
"""simple docstring"""
def a__ ( self: Optional[int] , __a: Optional[int] , __a: Tuple , __a: Optional[int] )-> List[str]:
return None
class A__ :
"""simple docstring"""
def a__ ( self: Optional[int] , __a: Tuple , __a: str , __a: str , __a: str )-> Tuple:
return None
class A__ ( unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] =[
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def a__ ( self: Optional[Any] )-> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , """tf""" , 12 , **__a )
@require_torch
@slow
def a__ ( self: str )-> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , """pt""" , 12 , **__a )
@require_torch
@slow
def a__ ( self: Union[str, Any] )-> Dict:
from transformers import BertModel
lowerCamelCase : int = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(__a ) )
vocab_file.flush()
lowerCamelCase : Dict = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase : List[str] = BertModel(BertConfig(vocab_size=len(__a ) ) )
model.save_pretrained(__a )
self._test_export(__a , """pt""" , 12 , __a )
@require_tf
@slow
def a__ ( self: Optional[Any] )-> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase : Optional[int] = self._test_export(__a , """tf""" , 12 , **__a )
lowerCamelCase : Tuple = quantize(Path(__a ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def a__ ( self: Any )-> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase : Any = self._test_export(__a , """pt""" , 12 , **__a )
lowerCamelCase : Dict = quantize(__a )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def a__ ( self: List[Any] , __a: Optional[Any] , __a: List[Any] , __a: Union[str, Any] , __a: Optional[Any]=None , **__a: Optional[int] )-> Any:
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase : Optional[Any] = Path(__a ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__a , __a , __a , __a , __a , **__a )
return path
except Exception as e:
self.fail(__a )
@require_torch
@require_tokenizers
@slow
def a__ ( self: Tuple )-> Dict:
from transformers import BertModel
lowerCamelCase : int = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCamelCase : List[Any] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__a , __a , """pt""" )
@require_tf
@require_tokenizers
@slow
def a__ ( self: Optional[Any] )-> List[Any]:
from transformers import TFBertModel
lowerCamelCase : Union[str, Any] = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCamelCase : str = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__a , __a , """tf""" )
def a__ ( self: List[str] , __a: str , __a: Optional[Any] , __a: str )-> List[Any]:
lowerCamelCase : List[str] = FeatureExtractionPipeline(__a , __a )
lowerCamelCase : List[str] = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = infer_shapes(__a , __a )
# Assert all variables are present
self.assertEqual(len(__a ) , len(__a ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __a )
self.assertSequenceEqual(variable_names[3:] , __a )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} )
def a__ ( self: List[Any] )-> int:
lowerCamelCase : List[str] = ["""input_ids""", """attention_mask""", """token_type_ids"""]
lowerCamelCase : str = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncContiguousArgs() , __a , __a )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__a ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__a ) , set(__a ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__a , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncNonContiguousArgs() , __a , __a )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__a ) , 1 )
self.assertEqual(len(__a ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] , """input_ids""" )
def a__ ( self: Tuple )-> Tuple:
lowerCamelCase : Optional[int] = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
| 42
| 0
|
"""simple docstring"""
import string
from math import logaa
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> int:
lowerCamelCase : str = document.translate(
str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" , """""" )
lowerCamelCase : int = document_without_punctuation.split(""" """ ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> tuple[int, int]:
lowerCamelCase : int = corpus.lower().translate(
str.maketrans("""""" , """""" , string.punctuation ) ) # strip all punctuation and replace it with ''
lowerCamelCase : Optional[Any] = corpus_without_punctuation.split("""\n""" )
lowerCamelCase : Any = term.lower()
return (len([doc for doc in docs if term in doc] ), len(UpperCamelCase__ ))
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Any=False ) -> float:
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("""df must be > 0""" )
elif n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(logaa(n / df ) , 3 )
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> float:
return round(tf * idf , 3 )
| 700
|
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Optional[int] )-> Union[str, Any]:
lowerCamelCase : Tuple = [10, 20, 30, 40, 50, 60]
lowerCamelCase : Union[str, Any] = [2, 4, 6, 8, 10, 12]
lowerCamelCase : Union[str, Any] = 100
self.assertEqual(kp.calc_profit(__a , __a , __a ) , 210 )
def a__ ( self: str )-> str:
self.assertRaisesRegex(__a , """max_weight must greater than zero.""" )
def a__ ( self: str )-> List[Any]:
self.assertRaisesRegex(__a , """Weight can not be negative.""" )
def a__ ( self: Any )-> Dict:
self.assertRaisesRegex(__a , """Profit can not be negative.""" )
def a__ ( self: Optional[Any] )-> List[Any]:
self.assertRaisesRegex(__a , """max_weight must greater than zero.""" )
def a__ ( self: Optional[Any] )-> Tuple:
self.assertRaisesRegex(
__a , """The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 42
| 0
|
"""simple docstring"""
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
return int((input_a, input_a).count(0 ) != 0 )
def snake_case ( ) -> None:
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 701
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase :List[str] = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[int] = ['OwlViTFeatureExtractor']
__lowerCamelCase :List[str] = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[Any] = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 0
|
"""simple docstring"""
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def snake_case ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict=None , UpperCamelCase__ : str=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : Any=None , ) -> Optional[Any]:
if attention_mask is None:
lowerCamelCase : Any = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCamelCase : int = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCamelCase : List[Any] = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=UpperCamelCase__ )
if decoder_head_mask is None:
lowerCamelCase : List[str] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=UpperCamelCase__ )
if cross_attn_head_mask is None:
lowerCamelCase : List[Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=UpperCamelCase__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class A__ :
"""simple docstring"""
def __init__( self: Dict , __a: Any , __a: Optional[int]=13 , __a: Tuple=7 , __a: Optional[int]=True , __a: Optional[int]=False , __a: List[str]=99 , __a: str=16 , __a: str=2 , __a: List[str]=4 , __a: Union[str, Any]=4 , __a: Dict="relu" , __a: int=0.1 , __a: str=0.1 , __a: Union[str, Any]=0.0 , __a: Optional[Any]=0.0 , __a: List[Any]=20 , __a: str=2 , __a: int=1 , __a: Optional[Any]=0 , )-> Optional[int]:
lowerCamelCase : Any = parent
lowerCamelCase : Any = batch_size
lowerCamelCase : List[str] = seq_length
lowerCamelCase : Optional[int] = is_training
lowerCamelCase : str = use_labels
lowerCamelCase : Tuple = vocab_size
lowerCamelCase : Optional[Any] = hidden_size
lowerCamelCase : Tuple = num_hidden_layers
lowerCamelCase : List[Any] = num_attention_heads
lowerCamelCase : Tuple = intermediate_size
lowerCamelCase : str = hidden_act
lowerCamelCase : Optional[Any] = hidden_dropout_prob
lowerCamelCase : int = attention_probs_dropout_prob
lowerCamelCase : Optional[int] = encoder_layerdrop
lowerCamelCase : Dict = decoder_layerdrop
lowerCamelCase : List[Any] = max_position_embeddings
lowerCamelCase : List[str] = eos_token_id
lowerCamelCase : List[str] = pad_token_id
lowerCamelCase : Tuple = bos_token_id
def a__ ( self: List[Any] )-> Tuple:
lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : List[str] = self.eos_token_id # Eos Token
lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCamelCase : List[str] = input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase : str = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase : List[str] = self.get_config()
lowerCamelCase : Tuple = prepare_mam_aaa_inputs_dict(__a , __a , __a )
return config, inputs_dict
def a__ ( self: str )-> int:
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def a__ ( self: List[str] )-> Optional[Any]:
lowerCamelCase : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def a__ ( self: Any , __a: Optional[int] , __a: List[Any] )-> List[str]:
lowerCamelCase : Dict = MaMaaaModel(config=__a ).get_decoder().to(__a ).eval()
lowerCamelCase : Any = inputs_dict["""input_ids"""]
lowerCamelCase : Union[str, Any] = inputs_dict["""attention_mask"""]
lowerCamelCase : Dict = inputs_dict["""head_mask"""]
# first forward pass
lowerCamelCase : Dict = model(__a , attention_mask=__a , head_mask=__a , use_cache=__a )
lowerCamelCase : Dict = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase : Tuple = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
lowerCamelCase : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase : Optional[int] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
lowerCamelCase : Tuple = model(__a , attention_mask=__a )["""last_hidden_state"""]
lowerCamelCase : Optional[Any] = model(__a , attention_mask=__a , past_key_values=__a )[
"""last_hidden_state"""
]
# select random slice
lowerCamelCase : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase : Dict = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a , __a , atol=1e-2 ) )
def a__ ( self: Union[str, Any] , __a: str , __a: Dict )-> Any:
lowerCamelCase : Union[str, Any] = MaMaaaModel(config=__a ).to(__a ).eval()
lowerCamelCase : Any = model(**__a )
lowerCamelCase : Any = outputs.encoder_last_hidden_state
lowerCamelCase : Dict = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase : List[Any] = model.get_encoder()
encoder.save_pretrained(__a )
lowerCamelCase : Union[str, Any] = MaMaaaEncoder.from_pretrained(__a ).to(__a )
lowerCamelCase : List[Any] = encoder(inputs_dict["""input_ids"""] , attention_mask=inputs_dict["""attention_mask"""] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase : Dict = model.get_decoder()
decoder.save_pretrained(__a )
lowerCamelCase : Dict = MaMaaaDecoder.from_pretrained(__a ).to(__a )
lowerCamelCase : int = decoder(
input_ids=inputs_dict["""decoder_input_ids"""] , attention_mask=inputs_dict["""decoder_attention_mask"""] , encoder_hidden_states=__a , encoder_attention_mask=inputs_dict["""attention_mask"""] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class A__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str =(
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
snake_case__ : Optional[int] =(MaMaaaForConditionalGeneration,) if is_torch_available() else ()
snake_case__ : int =(
{
'''conversational''': MaMaaaForConditionalGeneration,
'''feature-extraction''': MaMaaaModel,
'''summarization''': MaMaaaForConditionalGeneration,
'''text2text-generation''': MaMaaaForConditionalGeneration,
'''translation''': MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
snake_case__ : List[Any] =True
snake_case__ : Optional[Any] =True
snake_case__ : Dict =False
snake_case__ : Optional[Any] =False
def a__ ( self: Tuple , __a: Dict , __a: List[Any] , __a: Union[str, Any] , __a: Any , __a: Optional[Any] )-> Dict:
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def a__ ( self: Any )-> List[str]:
lowerCamelCase : Any = MaMaaaModelTester(self )
lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=__a )
def a__ ( self: str )-> str:
self.config_tester.run_common_tests()
def a__ ( self: Optional[int] )-> Any:
lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
lowerCamelCase : Optional[Any] = model_class(__a )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
lowerCamelCase : str = model_class.from_pretrained(__a , output_loading_info=__a )
self.assertEqual(info["""missing_keys"""] , [] )
def a__ ( self: Dict )-> Tuple:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__a )
def a__ ( self: Union[str, Any] )-> str:
lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__a )
def a__ ( self: Tuple )-> Any:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
lowerCamelCase : Tuple = model_class(__a )
model.to(__a )
model.eval()
lowerCamelCase : Tuple = copy.deepcopy(self._prepare_for_class(__a , __a ) )
if not self.is_encoder_decoder:
lowerCamelCase : str = inputs["""input_ids"""]
del inputs["input_ids"]
else:
lowerCamelCase : List[Any] = inputs["""input_ids"""]
lowerCamelCase : int = inputs.get("""decoder_input_ids""" , __a )
del inputs["input_ids"]
inputs.pop("""decoder_input_ids""" , __a )
lowerCamelCase : Dict = model.get_input_embeddings()
if not self.is_encoder_decoder:
lowerCamelCase : Any = wte(__a )
else:
lowerCamelCase : Optional[int] = wte(__a )
lowerCamelCase : Tuple = wte(__a )
with torch.no_grad():
model(**__a )[0]
def a__ ( self: List[str] )-> List[str]:
lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
lowerCamelCase : List[str] = input_dict["""input_ids"""]
lowerCamelCase : int = input_ids.ne(1 ).to(__a )
lowerCamelCase : Any = MaMaaaForConditionalGeneration(__a ).eval().to(__a )
if torch_device == "cuda":
model.half()
model.generate(__a , attention_mask=__a )
model.generate(num_beams=4 , do_sample=__a , early_stopping=__a , num_return_sequences=3 )
def snake_case ( UpperCamelCase__ : Dict ) -> Optional[Any]:
return torch.tensor(UpperCamelCase__ , dtype=torch.long , device=UpperCamelCase__ )
__lowerCamelCase :List[Any] = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class A__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def a__ ( self: Any )-> str:
return MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" )
def a__ ( self: Optional[Any] )-> int:
lowerCamelCase : Dict = MaMaaaModel.from_pretrained("""facebook/m2m100_418M""" ).to(__a )
lowerCamelCase : List[Any] = _long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]] )
lowerCamelCase : int = _long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]] )
lowerCamelCase : Any = prepare_mam_aaa_inputs_dict(model.config , __a , __a )
with torch.no_grad():
lowerCamelCase : Optional[Any] = model(**__a )[0]
lowerCamelCase : List[Any] = torch.Size((1, 11, 1_024) )
self.assertEqual(output.shape , __a )
# change to expected output here
lowerCamelCase : int = torch.tensor(
[[-0.77_80, -0.16_76, 0.10_38], [-6.75_56, -1.39_92, 0.05_67], [-7.53_83, -0.59_20, -0.27_79]] , device=__a )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=__a ) )
def a__ ( self: Optional[Any] )-> Optional[Any]:
lowerCamelCase : Optional[Any] = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(__a )
# change to intended input
lowerCamelCase : Union[str, Any] = _long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]] )
lowerCamelCase : Optional[Any] = _long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]] )
lowerCamelCase : Any = prepare_mam_aaa_inputs_dict(model.config , __a , __a )
with torch.no_grad():
lowerCamelCase : str = model(**__a )[0]
lowerCamelCase : Dict = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , __a )
# change to expected output here
lowerCamelCase : str = torch.tensor(
[[-1.04_48, -1.04_11, 3.79_92], [-3.21_91, -3.23_86, -1.34_51], [-3.62_10, -3.59_93, 0.49_25]] , device=__a )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=__a ) )
def a__ ( self: str )-> Optional[int]:
lowerCamelCase : Union[str, Any] = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(__a )
lowerCamelCase : Any = MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" , src_lang="""fr""" , tgt_lang="""en""" )
lowerCamelCase : List[str] = [
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"""
""" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"""
""" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.""",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
lowerCamelCase : int = tokenizer(__a , padding=__a , return_tensors="""pt""" )
lowerCamelCase : str = model.generate(
input_ids=dct["""input_ids"""].to(__a ) , attention_mask=dct["""attention_mask"""].to(__a ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("""en""" ) , )
lowerCamelCase : Tuple = [
"""The NSA case highlights the total absence of intelligence debate""",
"""I think there are two levels of response from the French government.""",
"""When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."""
""" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"""
""" communications in France.""",
]
lowerCamelCase : Optional[int] = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=__a , skip_special_tokens=__a )
assert generated == expected_en
| 702
|
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self: List[Any] , __a: List[str] , __a: Optional[int]=13 , __a: List[str]=32 , __a: int=2 , __a: List[str]=3 , __a: Union[str, Any]=16 , __a: int=[32, 64, 128] , __a: Optional[Any]=[1, 2, 1] , __a: Optional[int]=[2, 2, 4] , __a: Tuple=2 , __a: Dict=2.0 , __a: List[str]=True , __a: Optional[Any]=0.0 , __a: Any=0.0 , __a: List[Any]=0.1 , __a: List[str]="gelu" , __a: Tuple=False , __a: Union[str, Any]=True , __a: Optional[int]=0.02 , __a: Tuple=1e-5 , __a: int=True , __a: List[Any]=None , __a: Optional[int]=True , __a: Dict=10 , __a: List[str]=8 , __a: Any=["stage1", "stage2"] , __a: Union[str, Any]=[1, 2] , )-> Dict:
lowerCamelCase : Dict = parent
lowerCamelCase : Optional[Any] = batch_size
lowerCamelCase : Union[str, Any] = image_size
lowerCamelCase : Optional[int] = patch_size
lowerCamelCase : Any = num_channels
lowerCamelCase : Any = embed_dim
lowerCamelCase : Dict = hidden_sizes
lowerCamelCase : List[Any] = depths
lowerCamelCase : Tuple = num_heads
lowerCamelCase : List[Any] = window_size
lowerCamelCase : str = mlp_ratio
lowerCamelCase : str = qkv_bias
lowerCamelCase : str = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Tuple = drop_path_rate
lowerCamelCase : Dict = hidden_act
lowerCamelCase : Tuple = use_absolute_embeddings
lowerCamelCase : List[str] = patch_norm
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : str = initializer_range
lowerCamelCase : Tuple = is_training
lowerCamelCase : int = scope
lowerCamelCase : Union[str, Any] = use_labels
lowerCamelCase : List[str] = type_sequence_label_size
lowerCamelCase : str = encoder_stride
lowerCamelCase : List[str] = out_features
lowerCamelCase : Optional[int] = out_indices
def a__ ( self: Optional[Any] )-> Union[str, Any]:
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : str = None
if self.use_labels:
lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : str = self.get_config()
return config, pixel_values, labels
def a__ ( self: List[Any] )-> Optional[int]:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def a__ ( self: Tuple , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = FocalNetModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Tuple = model(__a )
lowerCamelCase : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def a__ ( self: Optional[int] , __a: Dict , __a: Tuple , __a: List[Any] )-> int:
lowerCamelCase : List[Any] = FocalNetBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Optional[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowerCamelCase : Dict = None
lowerCamelCase : Dict = FocalNetBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self: Optional[int] , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = FocalNetForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[str] = model(__a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase : List[str] = 1
lowerCamelCase : Any = FocalNetForMaskedImageModeling(__a )
model.to(__a )
model.eval()
lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : Tuple = model(__a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a__ ( self: str , __a: Optional[Any] , __a: Optional[Any] , __a: Tuple )-> str:
lowerCamelCase : Optional[Any] = self.type_sequence_label_size
lowerCamelCase : Optional[Any] = FocalNetForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : List[str] = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase : int = 1
lowerCamelCase : List[Any] = FocalNetForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : Optional[Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self: int )-> Optional[int]:
lowerCamelCase : str = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = config_and_inputs
lowerCamelCase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =(
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
snake_case__ : Optional[int] =(
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
snake_case__ : Tuple =False
snake_case__ : Dict =False
snake_case__ : Dict =False
snake_case__ : Tuple =False
snake_case__ : Optional[int] =False
def a__ ( self: Union[str, Any] )-> Optional[int]:
lowerCamelCase : List[str] = FocalNetModelTester(self )
lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__a , embed_dim=37 , has_text_modality=__a )
def a__ ( self: List[str] )-> List[str]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self: List[str] )-> Union[str, Any]:
return
def a__ ( self: Tuple )-> Tuple:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: List[Any] )-> Dict:
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def a__ ( self: List[Any] )-> Tuple:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def a__ ( self: List[str] )-> Dict:
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def a__ ( self: Optional[Any] )-> str:
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def a__ ( self: Optional[Any] )-> Dict:
pass
def a__ ( self: Optional[Any] )-> Dict:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : Any = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def a__ ( self: Tuple )-> Optional[int]:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : int = model_class(__a )
lowerCamelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Any = [*signature.parameters.keys()]
lowerCamelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def a__ ( self: str , __a: Union[str, Any] , __a: int , __a: Tuple , __a: List[str] )-> Union[str, Any]:
lowerCamelCase : List[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase : List[str] = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase : List[str] = outputs.hidden_states
lowerCamelCase : Tuple = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__a ) , __a )
# FocalNet has a different seq_length
lowerCamelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCamelCase : Optional[Any] = outputs.reshaped_hidden_states
self.assertEqual(len(__a ) , __a )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = reshaped_hidden_states[0].shape
lowerCamelCase : Tuple = (
reshaped_hidden_states[0].view(__a , __a , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def a__ ( self: Any )-> Any:
lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : List[str] = True
self.check_hidden_states_output(__a , __a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__a , __a , __a , __a )
def a__ ( self: str )-> Union[str, Any]:
lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : List[str] = 3
lowerCamelCase : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase : Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : str = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Union[str, Any] = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
@slow
def a__ ( self: Optional[int] )-> List[Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : List[str] = FocalNetModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def a__ ( self: str )-> Any:
lowerCamelCase , lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : int = _config_zero_init(__a )
for model_class in self.all_model_classes:
lowerCamelCase : int = model_class(config=__a )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class A__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def a__ ( self: Optional[int] )-> Optional[Any]:
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : Tuple = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(__a )
lowerCamelCase : Any = self.default_image_processor
lowerCamelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCamelCase : int = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
lowerCamelCase : Any = model(**__a )
# verify the logits
lowerCamelCase : Tuple = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase : List[str] = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str =(FocalNetBackbone,) if is_torch_available() else ()
snake_case__ : Optional[int] =FocalNetConfig
snake_case__ : str =False
def a__ ( self: Union[str, Any] )-> Tuple:
lowerCamelCase : str = FocalNetModelTester(self )
| 42
| 0
|
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[int] =IFInpaintingSuperResolutionPipeline
snake_case__ : Tuple =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
snake_case__ : Dict =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''})
snake_case__ : Optional[Any] =PipelineTesterMixin.required_optional_params - {'''latents'''}
def a__ ( self: Optional[Any] )-> Tuple:
return self._get_superresolution_dummy_components()
def a__ ( self: List[Any] , __a: List[Any] , __a: int=0 )-> List[str]:
if str(__a ).startswith("""mps""" ):
lowerCamelCase : List[Any] = torch.manual_seed(__a )
else:
lowerCamelCase : Dict = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : Any = floats_tensor((1, 3, 16, 16) , rng=random.Random(__a ) ).to(__a )
lowerCamelCase : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
lowerCamelCase : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
lowerCamelCase : List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def a__ ( self: Optional[int] )-> int:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def a__ ( self: Any )-> Tuple:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def a__ ( self: List[Any] )-> Any:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def a__ ( self: Optional[Any] )-> Optional[int]:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def a__ ( self: Optional[Any] )-> int:
self._test_save_load_local()
def a__ ( self: Dict )-> List[str]:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 703
|
"""simple docstring"""
import os
def snake_case ( ) -> Optional[Any]:
with open(os.path.dirname(UpperCamelCase__ ) + """/grid.txt""" ) as f:
lowerCamelCase : int = [] # noqa: E741
for _ in range(20 ):
l.append([int(UpperCamelCase__ ) for x in f.readline().split()] )
lowerCamelCase : Union[str, Any] = 0
# right
for i in range(20 ):
for j in range(17 ):
lowerCamelCase : Dict = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
lowerCamelCase : Tuple = temp
# down
for i in range(17 ):
for j in range(20 ):
lowerCamelCase : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
lowerCamelCase : Optional[Any] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
lowerCamelCase : List[Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
lowerCamelCase : List[str] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
lowerCamelCase : List[str] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
lowerCamelCase : List[Any] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 42
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :List[str] = logging.get_logger(__name__)
__lowerCamelCase :Optional[Any] = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple ='''visual_bert'''
def __init__( self: Optional[Any] , __a: Union[str, Any]=30_522 , __a: Tuple=768 , __a: Dict=512 , __a: Dict=12 , __a: Any=12 , __a: Optional[Any]=3_072 , __a: Tuple="gelu" , __a: List[Any]=0.1 , __a: Tuple=0.1 , __a: Optional[int]=512 , __a: str=2 , __a: List[Any]=0.02 , __a: List[Any]=1e-1_2 , __a: List[str]=False , __a: Dict=True , __a: Optional[int]=1 , __a: str=0 , __a: Dict=2 , **__a: Union[str, Any] , )-> Optional[int]:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
lowerCamelCase : List[Any] = vocab_size
lowerCamelCase : Tuple = max_position_embeddings
lowerCamelCase : Optional[int] = hidden_size
lowerCamelCase : Optional[int] = visual_embedding_dim
lowerCamelCase : Any = num_hidden_layers
lowerCamelCase : Optional[int] = num_attention_heads
lowerCamelCase : Dict = intermediate_size
lowerCamelCase : Dict = hidden_act
lowerCamelCase : Dict = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Optional[Any] = type_vocab_size
lowerCamelCase : str = layer_norm_eps
lowerCamelCase : List[Any] = bypass_transformer
lowerCamelCase : int = special_visual_initialize
| 704
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
__lowerCamelCase :Any = False
@skip_mps
class A__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] =StableDiffusionAttendAndExcitePipeline
snake_case__ : Any =False
snake_case__ : Dict =TEXT_TO_IMAGE_PARAMS
snake_case__ : Any =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''})
snake_case__ : Dict =TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case__ : str =TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def a__ ( cls: Dict )-> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(__a )
@classmethod
def a__ ( cls: Union[str, Any] )-> Any:
super().tearDownClass()
torch.use_deterministic_algorithms(__a )
def a__ ( self: Tuple )-> Union[str, Any]:
torch.manual_seed(0 )
lowerCamelCase : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , )
lowerCamelCase : Union[str, Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
lowerCamelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , )
lowerCamelCase : Optional[int] = CLIPTextModel(__a )
lowerCamelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def a__ ( self: Tuple , __a: int , __a: Union[str, Any]=0 )-> Optional[Any]:
if str(__a ).startswith("""mps""" ):
lowerCamelCase : Tuple = torch.manual_seed(__a )
else:
lowerCamelCase : str = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : Dict = {
"""prompt""": """a cat and a frog""",
"""token_indices""": [2, 5],
"""generator""": generator,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""max_iter_to_alter""": 2,
"""thresholds""": {0: 0.7},
}
return inputs
def a__ ( self: Dict )-> str:
lowerCamelCase : Tuple = """cpu"""
lowerCamelCase : List[str] = self.get_dummy_components()
lowerCamelCase : List[Any] = self.pipeline_class(**__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Any = self.get_dummy_inputs(__a )
lowerCamelCase : Union[str, Any] = pipe(**__a ).images
lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
lowerCamelCase : Optional[Any] = np.array(
[0.63_90_53_64, 0.62_89_73_07, 0.48_59_90_17, 0.5_13_36_24, 0.5_55_00_48, 0.45_76_95_16, 0.50_32_69_73, 0.5_02_31_39, 0.45_38_44_96] )
lowerCamelCase : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__a , 1e-3 )
def a__ ( self: int )-> Optional[Any]:
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def a__ ( self: Union[str, Any] )-> Optional[int]:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a__ ( self: Tuple )-> int:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def a__ ( self: Dict )-> List[Any]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def a__ ( self: Optional[int] )-> Dict:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def a__ ( self: Any )-> Tuple:
super().test_save_load_local(expected_max_difference=5e-4 )
def a__ ( self: str )-> str:
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class A__ ( unittest.TestCase):
"""simple docstring"""
@classmethod
def a__ ( cls: Any )-> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(__a )
@classmethod
def a__ ( cls: Dict )-> Optional[int]:
super().tearDownClass()
torch.use_deterministic_algorithms(__a )
def a__ ( self: int )-> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : List[Any] = torch.manual_seed(51 )
lowerCamelCase : List[str] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , safety_checker=__a , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
lowerCamelCase : Dict = """a painting of an elephant with glasses"""
lowerCamelCase : Any = [5, 7]
lowerCamelCase : Tuple = pipe(
prompt=__a , token_indices=__a , guidance_scale=7.5 , generator=__a , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0]
lowerCamelCase : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 42
| 0
|
"""simple docstring"""
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : str ) -> str:
if not (isinstance(UpperCamelCase__ , UpperCamelCase__ ) and isinstance(UpperCamelCase__ , UpperCamelCase__ )):
raise ValueError("""longest_common_substring() takes two strings for inputs""" )
lowerCamelCase : Union[str, Any] = len(UpperCamelCase__ )
lowerCamelCase : int = len(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
lowerCamelCase : int = 0
lowerCamelCase : int = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
lowerCamelCase : Union[str, Any] = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
lowerCamelCase : int = i
lowerCamelCase : Dict = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705
|
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class A__ :
"""simple docstring"""
def __init__( self: List[str] , __a: List[str] , __a: Dict=13 , __a: Tuple=7 , __a: Dict=False , __a: str=True , __a: List[Any]=False , __a: Dict=True , __a: Any=33 , __a: Optional[Any]=32 , __a: List[Any]=5 , __a: Any=4 , __a: Dict=37 , __a: str="gelu" , __a: str=0.1 , __a: int=0.1 , __a: Optional[int]=512 , __a: List[Any]=16 , __a: int=2 , __a: int=0.02 , __a: Optional[int]=3 , __a: str=4 , __a: Tuple=None , )-> Tuple:
lowerCamelCase : Union[str, Any] = parent
lowerCamelCase : Tuple = batch_size
lowerCamelCase : Any = seq_length
lowerCamelCase : Any = is_training
lowerCamelCase : Tuple = use_input_mask
lowerCamelCase : int = use_token_type_ids
lowerCamelCase : List[str] = use_labels
lowerCamelCase : Optional[int] = vocab_size
lowerCamelCase : Tuple = hidden_size
lowerCamelCase : List[str] = num_hidden_layers
lowerCamelCase : Optional[int] = num_attention_heads
lowerCamelCase : Optional[Any] = intermediate_size
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : Union[str, Any] = hidden_dropout_prob
lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase : Any = max_position_embeddings
lowerCamelCase : str = type_vocab_size
lowerCamelCase : List[Any] = type_sequence_label_size
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : Union[str, Any] = num_labels
lowerCamelCase : Optional[Any] = num_choices
lowerCamelCase : Any = scope
def a__ ( self: Optional[int] )-> List[Any]:
lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : Dict = None
if self.use_input_mask:
lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase : Any = None
lowerCamelCase : int = None
lowerCamelCase : Union[str, Any] = None
if self.use_labels:
lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase : List[str] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self: Tuple )-> Union[str, Any]:
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def a__ ( self: List[Any] , __a: List[str] , __a: str , __a: Tuple , __a: List[str] , __a: List[str] , __a: str )-> int:
lowerCamelCase : Optional[int] = EsmModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a , attention_mask=__a )
lowerCamelCase : str = model(__a )
lowerCamelCase : Optional[Any] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def a__ ( self: int , __a: Union[str, Any] , __a: Optional[int] , __a: List[str] , __a: str , __a: List[str] , __a: Tuple )-> int:
lowerCamelCase : str = EsmForMaskedLM(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self: List[str] , __a: List[Any] , __a: List[str] , __a: int , __a: Union[str, Any] , __a: List[Any] , __a: Tuple )-> List[str]:
lowerCamelCase : Tuple = self.num_labels
lowerCamelCase : Dict = EsmForTokenClassification(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self: Optional[int] )-> Optional[int]:
lowerCamelCase : Any = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Tuple = config_and_inputs
lowerCamelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Any =False
snake_case__ : Dict =(
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
snake_case__ : Dict =()
snake_case__ : Optional[int] =(
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ : Any =True
def a__ ( self: Optional[int] )-> Optional[int]:
lowerCamelCase : Optional[Any] = EsmModelTester(self )
lowerCamelCase : Any = ConfigTester(self , config_class=__a , hidden_size=37 )
def a__ ( self: List[Any] )-> Optional[Any]:
self.config_tester.run_common_tests()
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: Tuple )-> Any:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase : Tuple = type
self.model_tester.create_and_check_model(*__a )
def a__ ( self: List[str] )-> Tuple:
lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def a__ ( self: Any )-> List[Any]:
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : int = EsmModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def a__ ( self: str )-> List[str]:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase : Union[str, Any] = EsmEmbeddings(config=__a )
lowerCamelCase : List[str] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowerCamelCase : Union[str, Any] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowerCamelCase : Optional[Any] = create_position_ids_from_input_ids(__a , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
def a__ ( self: Optional[int] )-> int:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()[0]
lowerCamelCase : Any = EsmEmbeddings(config=__a )
lowerCamelCase : Dict = torch.empty(2 , 4 , 30 )
lowerCamelCase : List[Any] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowerCamelCase : Any = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowerCamelCase : List[str] = embeddings.create_position_ids_from_inputs_embeds(__a )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__a , __a ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def a__ ( self: Any )-> Optional[Any]:
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def a__ ( self: Dict )-> Dict:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def a__ ( self: List[str] )-> Dict:
pass
@require_torch
class A__ ( __lowercase):
"""simple docstring"""
@slow
def a__ ( self: Any )-> Union[str, Any]:
with torch.no_grad():
lowerCamelCase : Union[str, Any] = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
lowerCamelCase : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase : Tuple = model(__a )[0]
lowerCamelCase : Dict = 33
lowerCamelCase : List[str] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , __a )
lowerCamelCase : Tuple = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
@slow
def a__ ( self: Dict )-> str:
with torch.no_grad():
lowerCamelCase : Any = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
lowerCamelCase : Optional[Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase : Any = model(__a )[0]
# compare the actual values for a slice.
lowerCamelCase : Tuple = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
| 42
| 0
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase :Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase :int = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
__lowerCamelCase :Dict = {
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
__lowerCamelCase :Any = {
'allenai/longformer-base-4096': 4_096,
'allenai/longformer-large-4096': 4_096,
'allenai/longformer-large-4096-finetuned-triviaqa': 4_096,
'allenai/longformer-base-4096-extra.pos.embd.only': 4_096,
'allenai/longformer-large-4096-extra.pos.embd.only': 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def snake_case ( ) -> List[str]:
lowerCamelCase : Optional[Any] = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
lowerCamelCase : Any = bs[:]
lowerCamelCase : Optional[int] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase__ )
cs.append(2**8 + n )
n += 1
lowerCamelCase : Tuple = [chr(UpperCamelCase__ ) for n in cs]
return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) )
def snake_case ( UpperCamelCase__ : Tuple ) -> Optional[Any]:
lowerCamelCase : Tuple = set()
lowerCamelCase : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCamelCase : List[Any] = char
return pairs
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Dict =VOCAB_FILES_NAMES
snake_case__ : Dict =PRETRAINED_VOCAB_FILES_MAP
snake_case__ : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Optional[Any] =['''input_ids''', '''attention_mask''']
def __init__( self: Optional[Any] , __a: Optional[Any] , __a: List[Any] , __a: Dict="replace" , __a: Optional[Any]="<s>" , __a: List[Any]="</s>" , __a: List[str]="</s>" , __a: List[str]="<s>" , __a: int="<unk>" , __a: Optional[int]="<pad>" , __a: Tuple="<mask>" , __a: Optional[Any]=False , **__a: Dict , )-> List[str]:
lowerCamelCase : List[str] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else bos_token
lowerCamelCase : Union[str, Any] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else eos_token
lowerCamelCase : Optional[int] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else sep_token
lowerCamelCase : List[str] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else cls_token
lowerCamelCase : Any = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else unk_token
lowerCamelCase : Union[str, Any] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase : Union[str, Any] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
super().__init__(
errors=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , add_prefix_space=__a , **__a , )
with open(__a , encoding="""utf-8""" ) as vocab_handle:
lowerCamelCase : Optional[int] = json.load(__a )
lowerCamelCase : int = {v: k for k, v in self.encoder.items()}
lowerCamelCase : Union[str, Any] = errors # how to handle errors in decoding
lowerCamelCase : Any = bytes_to_unicode()
lowerCamelCase : int = {v: k for k, v in self.byte_encoder.items()}
with open(__a , encoding="""utf-8""" ) as merges_handle:
lowerCamelCase : Any = merges_handle.read().split("""\n""" )[1:-1]
lowerCamelCase : Dict = [tuple(merge.split() ) for merge in bpe_merges]
lowerCamelCase : Dict = dict(zip(__a , range(len(__a ) ) ) )
lowerCamelCase : Optional[int] = {}
lowerCamelCase : Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCamelCase : Dict = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def a__ ( self: Union[str, Any] )-> List[Any]:
return len(self.encoder )
def a__ ( self: int )-> Any:
return dict(self.encoder , **self.added_tokens_encoder )
def a__ ( self: str , __a: List[Any] )-> Union[str, Any]:
if token in self.cache:
return self.cache[token]
lowerCamelCase : Any = tuple(__a )
lowerCamelCase : Dict = get_pairs(__a )
if not pairs:
return token
while True:
lowerCamelCase : List[str] = min(__a , key=lambda __a : self.bpe_ranks.get(__a , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCamelCase : Optional[Any] = bigram
lowerCamelCase : Union[str, Any] = []
lowerCamelCase : Optional[Any] = 0
while i < len(__a ):
try:
lowerCamelCase : Optional[Any] = word.index(__a , __a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCamelCase : Any = j
if word[i] == first and i < len(__a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCamelCase : Optional[int] = tuple(__a )
lowerCamelCase : List[str] = new_word
if len(__a ) == 1:
break
else:
lowerCamelCase : Union[str, Any] = get_pairs(__a )
lowerCamelCase : Union[str, Any] = """ """.join(__a )
lowerCamelCase : Dict = word
return word
def a__ ( self: str , __a: Union[str, Any] )-> Any:
lowerCamelCase : int = []
for token in re.findall(self.pat , __a ):
lowerCamelCase : str = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__a ).split(""" """ ) )
return bpe_tokens
def a__ ( self: Optional[Any] , __a: List[str] )-> Optional[Any]:
return self.encoder.get(__a , self.encoder.get(self.unk_token ) )
def a__ ( self: Union[str, Any] , __a: List[Any] )-> Tuple:
return self.decoder.get(__a )
def a__ ( self: Any , __a: List[str] )-> Any:
lowerCamelCase : Union[str, Any] = """""".join(__a )
lowerCamelCase : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def a__ ( self: List[Any] , __a: str , __a: Optional[str] = None )-> Tuple[str]:
if not os.path.isdir(__a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCamelCase : int = os.path.join(
__a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase : Optional[int] = os.path.join(
__a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(__a , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__a , ensure_ascii=__a ) + """\n""" )
lowerCamelCase : List[Any] = 0
with open(__a , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __a : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
lowerCamelCase : Tuple = token_index
writer.write(""" """.join(__a ) + """\n""" )
index += 1
return vocab_file, merge_file
def a__ ( self: List[Any] , __a: List[int] , __a: Optional[List[int]] = None )-> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase : Any = [self.cls_token_id]
lowerCamelCase : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a__ ( self: Any , __a: List[int] , __a: Optional[List[int]] = None , __a: bool = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is None:
return [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1, 1] + ([0] * len(__a )) + [1]
def a__ ( self: Tuple , __a: List[int] , __a: Optional[List[int]] = None )-> List[int]:
lowerCamelCase : int = [self.sep_token_id]
lowerCamelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def a__ ( self: Optional[Any] , __a: Tuple , __a: int=False , **__a: Any )-> Tuple:
lowerCamelCase : Optional[Any] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__a ) > 0 and not text[0].isspace()):
lowerCamelCase : str = """ """ + text
return (text, kwargs)
| 706
|
"""simple docstring"""
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase :str = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =AlbertTokenizer
snake_case__ : Optional[Any] =AlbertTokenizerFast
snake_case__ : Optional[int] =True
snake_case__ : Any =True
snake_case__ : Optional[int] =True
def a__ ( self: Dict )-> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase : int = AlbertTokenizer(__a )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self: Tuple , __a: Tuple )-> Union[str, Any]:
lowerCamelCase : List[str] = """this is a test"""
lowerCamelCase : int = """this is a test"""
return input_text, output_text
def a__ ( self: Any )-> List[Any]:
lowerCamelCase : int = """<pad>"""
lowerCamelCase : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def a__ ( self: Tuple )-> str:
lowerCamelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(__a ) , 30_000 )
def a__ ( self: List[str] )-> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def a__ ( self: Optional[Any] )-> Union[str, Any]:
if not self.test_rust_tokenizer:
return
lowerCamelCase : str = self.get_tokenizer()
lowerCamelCase : Tuple = self.get_rust_tokenizer()
lowerCamelCase : Union[str, Any] = """I was born in 92000, and this is falsé."""
lowerCamelCase : List[str] = tokenizer.tokenize(__a )
lowerCamelCase : Tuple = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
lowerCamelCase : Dict = tokenizer.encode(__a , add_special_tokens=__a )
lowerCamelCase : List[str] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
lowerCamelCase : Any = self.get_rust_tokenizer()
lowerCamelCase : List[str] = tokenizer.encode(__a )
lowerCamelCase : str = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
def a__ ( self: Tuple )-> List[Any]:
lowerCamelCase : List[str] = AlbertTokenizer(__a , keep_accents=__a )
lowerCamelCase : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__a , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [48, 25, 21, 1_289] )
lowerCamelCase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(__a , [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] )
lowerCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def a__ ( self: Tuple )-> str:
lowerCamelCase : str = AlbertTokenizer(__a )
lowerCamelCase : Union[str, Any] = tokenizer.encode("""sequence builders""" )
lowerCamelCase : List[Any] = tokenizer.encode("""multi-sequence build""" )
lowerCamelCase : Any = tokenizer.build_inputs_with_special_tokens(__a )
lowerCamelCase : str = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def a__ ( self: Any )-> Dict:
# fmt: off
lowerCamelCase : Optional[Any] = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 42
| 0
|
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class A__ :
"""simple docstring"""
def a__ ( self: Optional[int] , __a: Optional[int] , __a: Tuple , __a: Optional[int] )-> List[str]:
return None
class A__ :
"""simple docstring"""
def a__ ( self: Optional[int] , __a: Tuple , __a: str , __a: str , __a: str )-> Tuple:
return None
class A__ ( unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] =[
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def a__ ( self: Optional[Any] )-> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , """tf""" , 12 , **__a )
@require_torch
@slow
def a__ ( self: str )-> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , """pt""" , 12 , **__a )
@require_torch
@slow
def a__ ( self: Union[str, Any] )-> Dict:
from transformers import BertModel
lowerCamelCase : int = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(__a ) )
vocab_file.flush()
lowerCamelCase : Dict = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase : List[str] = BertModel(BertConfig(vocab_size=len(__a ) ) )
model.save_pretrained(__a )
self._test_export(__a , """pt""" , 12 , __a )
@require_tf
@slow
def a__ ( self: Optional[Any] )-> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase : Optional[int] = self._test_export(__a , """tf""" , 12 , **__a )
lowerCamelCase : Tuple = quantize(Path(__a ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def a__ ( self: Any )-> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase : Any = self._test_export(__a , """pt""" , 12 , **__a )
lowerCamelCase : Dict = quantize(__a )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def a__ ( self: List[Any] , __a: Optional[Any] , __a: List[Any] , __a: Union[str, Any] , __a: Optional[Any]=None , **__a: Optional[int] )-> Any:
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase : Optional[Any] = Path(__a ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__a , __a , __a , __a , __a , **__a )
return path
except Exception as e:
self.fail(__a )
@require_torch
@require_tokenizers
@slow
def a__ ( self: Tuple )-> Dict:
from transformers import BertModel
lowerCamelCase : int = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCamelCase : List[Any] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__a , __a , """pt""" )
@require_tf
@require_tokenizers
@slow
def a__ ( self: Optional[Any] )-> List[Any]:
from transformers import TFBertModel
lowerCamelCase : Union[str, Any] = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCamelCase : str = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__a , __a , """tf""" )
def a__ ( self: List[str] , __a: str , __a: Optional[Any] , __a: str )-> List[Any]:
lowerCamelCase : List[str] = FeatureExtractionPipeline(__a , __a )
lowerCamelCase : List[str] = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
lowerCamelCase : Tuple = infer_shapes(__a , __a )
# Assert all variables are present
self.assertEqual(len(__a ) , len(__a ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __a )
self.assertSequenceEqual(variable_names[3:] , __a )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} )
def a__ ( self: List[Any] )-> int:
lowerCamelCase : List[str] = ["""input_ids""", """attention_mask""", """token_type_ids"""]
lowerCamelCase : str = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
lowerCamelCase : List[Any] = ensure_valid_input(FuncContiguousArgs() , __a , __a )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__a ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__a ) , set(__a ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__a , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase : List[Any] = ensure_valid_input(FuncNonContiguousArgs() , __a , __a )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__a ) , 1 )
self.assertEqual(len(__a ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] , """input_ids""" )
def a__ ( self: Tuple )-> Tuple:
lowerCamelCase : Optional[int] = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
| 707
|
"""simple docstring"""
__lowerCamelCase :List[Any] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
__lowerCamelCase :Union[str, Any] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def snake_case ( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] ) -> list[int]:
lowerCamelCase : Tuple = True
lowerCamelCase : Any = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
order.append(UpperCamelCase__ )
return order
def snake_case ( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] ) -> list[int]:
lowerCamelCase : List[Any] = True
lowerCamelCase : int = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return component
def snake_case ( UpperCamelCase__ : dict[int, list[int]] ) -> list[list[int]]:
lowerCamelCase : int = len(UpperCamelCase__ ) * [False]
lowerCamelCase : dict[int, list[int]] = {vert: [] for vert in range(len(UpperCamelCase__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(UpperCamelCase__ )
lowerCamelCase : int = []
for i, was_visited in enumerate(UpperCamelCase__ ):
if not was_visited:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Tuple = []
lowerCamelCase : str = len(UpperCamelCase__ ) * [False]
for i in range(len(UpperCamelCase__ ) ):
lowerCamelCase : Any = order[len(UpperCamelCase__ ) - i - 1]
if not visited[vert]:
lowerCamelCase : List[str] = find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
components_list.append(UpperCamelCase__ )
return components_list
| 42
| 0
|
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class A__ ( __lowercase):
"""simple docstring"""
def __init__( self: List[Any] , __a: Distribution , __a: List[Any]=None , __a: Optional[int]=None , __a: Union[str, Any]=0 )-> Any:
lowerCamelCase : Any = 1.0 if scale is None else scale
lowerCamelCase : int = 0.0 if loc is None else loc
super().__init__(__a , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__a )] )
@property
def a__ ( self: Tuple )-> Union[str, Any]:
return self.base_dist.mean * self.scale + self.loc
@property
def a__ ( self: Dict )-> List[str]:
return self.base_dist.variance * self.scale**2
@property
def a__ ( self: List[str] )-> int:
return self.variance.sqrt()
class A__ ( nn.Module):
"""simple docstring"""
def __init__( self: Optional[int] , __a: int , __a: Dict[str, int] , __a: Callable[..., Tuple[torch.Tensor]] , **__a: Optional[Any] )-> None:
super().__init__(**__a )
lowerCamelCase : Optional[int] = args_dim
lowerCamelCase : List[str] = nn.ModuleList([nn.Linear(__a , __a ) for dim in args_dim.values()] )
lowerCamelCase : str = domain_map
def a__ ( self: List[str] , __a: torch.Tensor )-> Tuple[torch.Tensor]:
lowerCamelCase : List[Any] = [proj(__a ) for proj in self.proj]
return self.domain_map(*__a )
class A__ ( nn.Module):
"""simple docstring"""
def __init__( self: Union[str, Any] , __a: str )-> Tuple:
super().__init__()
lowerCamelCase : Any = function
def a__ ( self: str , __a: Union[str, Any] , *__a: Optional[Any] )-> Any:
return self.function(__a , *__a )
class A__ :
"""simple docstring"""
snake_case__ : type
snake_case__ : int
snake_case__ : Dict[str, int]
def __init__( self: Dict , __a: int = 1 )-> None:
lowerCamelCase : List[Any] = dim
lowerCamelCase : List[Any] = {k: dim * self.args_dim[k] for k in self.args_dim}
def a__ ( self: List[str] , __a: Dict )-> List[Any]:
if self.dim == 1:
return self.distribution_class(*__a )
else:
return Independent(self.distribution_class(*__a ) , 1 )
def a__ ( self: Any , __a: str , __a: Optional[torch.Tensor] = None , __a: Optional[torch.Tensor] = None , )-> Distribution:
lowerCamelCase : Union[str, Any] = self._base_distribution(__a )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__a , loc=__a , scale=__a , event_dim=self.event_dim )
@property
def a__ ( self: str )-> Tuple:
return () if self.dim == 1 else (self.dim,)
@property
def a__ ( self: Union[str, Any] )-> int:
return len(self.event_shape )
@property
def a__ ( self: Dict )-> float:
return 0.0
def a__ ( self: Union[str, Any] , __a: int )-> nn.Module:
return ParameterProjection(
in_features=__a , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def a__ ( self: Any , *__a: torch.Tensor )-> str:
raise NotImplementedError()
@staticmethod
def a__ ( __a: torch.Tensor )-> torch.Tensor:
return (x + torch.sqrt(torch.square(__a ) + 4.0 )) / 2.0
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Dict[str, int] ={"df": 1, "loc": 1, "scale": 1}
snake_case__ : type =StudentT
@classmethod
def a__ ( cls: Optional[Any] , __a: torch.Tensor , __a: torch.Tensor , __a: torch.Tensor )-> Tuple:
lowerCamelCase : Optional[Any] = cls.squareplus(__a ).clamp_min(torch.finfo(scale.dtype ).eps )
lowerCamelCase : Optional[int] = 2.0 + cls.squareplus(__a )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Dict[str, int] ={"loc": 1, "scale": 1}
snake_case__ : type =Normal
@classmethod
def a__ ( cls: Optional[int] , __a: torch.Tensor , __a: torch.Tensor )-> str:
lowerCamelCase : int = cls.squareplus(__a ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Dict[str, int] ={"total_count": 1, "logits": 1}
snake_case__ : type =NegativeBinomial
@classmethod
def a__ ( cls: Tuple , __a: torch.Tensor , __a: torch.Tensor )-> Union[str, Any]:
lowerCamelCase : int = cls.squareplus(__a )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def a__ ( self: List[Any] , __a: Union[str, Any] )-> Distribution:
lowerCamelCase : Optional[int] = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__a , logits=__a )
else:
return Independent(self.distribution_class(total_count=__a , logits=__a ) , 1 )
def a__ ( self: str , __a: Any , __a: Optional[torch.Tensor] = None , __a: Optional[torch.Tensor] = None )-> Distribution:
lowerCamelCase : List[str] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 708
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :str = logging.get_logger(__name__)
__lowerCamelCase :Any = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : List[Any] ='''time_series_transformer'''
snake_case__ : List[Any] ={
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self: List[str] , __a: Optional[int] = None , __a: Optional[int] = None , __a: str = "student_t" , __a: str = "nll" , __a: int = 1 , __a: List[int] = [1, 2, 3, 4, 5, 6, 7] , __a: Optional[Union[str, bool]] = "mean" , __a: int = 0 , __a: int = 0 , __a: int = 0 , __a: int = 0 , __a: Optional[List[int]] = None , __a: Optional[List[int]] = None , __a: int = 32 , __a: int = 32 , __a: int = 2 , __a: int = 2 , __a: int = 2 , __a: int = 2 , __a: bool = True , __a: str = "gelu" , __a: int = 64 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: int = 100 , __a: float = 0.02 , __a: Tuple=True , **__a: str , )-> Any:
# time series specific configuration
lowerCamelCase : str = prediction_length
lowerCamelCase : Optional[Any] = context_length or prediction_length
lowerCamelCase : Tuple = distribution_output
lowerCamelCase : Any = loss
lowerCamelCase : List[Any] = input_size
lowerCamelCase : int = num_time_features
lowerCamelCase : Dict = lags_sequence
lowerCamelCase : Optional[int] = scaling
lowerCamelCase : int = num_dynamic_real_features
lowerCamelCase : Tuple = num_static_real_features
lowerCamelCase : Any = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
lowerCamelCase : int = cardinality
else:
lowerCamelCase : Dict = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__a ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
lowerCamelCase : str = embedding_dimension
else:
lowerCamelCase : str = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCamelCase : Any = num_parallel_samples
# Transformer architecture configuration
lowerCamelCase : Any = input_size * len(__a ) + self._number_of_features
lowerCamelCase : List[str] = d_model
lowerCamelCase : Tuple = encoder_attention_heads
lowerCamelCase : Optional[int] = decoder_attention_heads
lowerCamelCase : Union[str, Any] = encoder_ffn_dim
lowerCamelCase : str = decoder_ffn_dim
lowerCamelCase : str = encoder_layers
lowerCamelCase : Any = decoder_layers
lowerCamelCase : Optional[int] = dropout
lowerCamelCase : List[str] = attention_dropout
lowerCamelCase : Tuple = activation_dropout
lowerCamelCase : Optional[int] = encoder_layerdrop
lowerCamelCase : int = decoder_layerdrop
lowerCamelCase : Optional[int] = activation_function
lowerCamelCase : Optional[Any] = init_std
lowerCamelCase : Optional[Any] = use_cache
super().__init__(is_encoder_decoder=__a , **__a )
@property
def a__ ( self: int )-> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 42
| 0
|
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Optional[int] )-> Union[str, Any]:
lowerCamelCase : Tuple = [10, 20, 30, 40, 50, 60]
lowerCamelCase : Union[str, Any] = [2, 4, 6, 8, 10, 12]
lowerCamelCase : Union[str, Any] = 100
self.assertEqual(kp.calc_profit(__a , __a , __a ) , 210 )
def a__ ( self: str )-> str:
self.assertRaisesRegex(__a , """max_weight must greater than zero.""" )
def a__ ( self: str )-> List[Any]:
self.assertRaisesRegex(__a , """Weight can not be negative.""" )
def a__ ( self: Any )-> Dict:
self.assertRaisesRegex(__a , """Profit can not be negative.""" )
def a__ ( self: Optional[Any] )-> List[Any]:
self.assertRaisesRegex(__a , """max_weight must greater than zero.""" )
def a__ ( self: Optional[Any] )-> Tuple:
self.assertRaisesRegex(
__a , """The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 709
|
"""simple docstring"""
from __future__ import annotations
__lowerCamelCase :int = 10
def snake_case ( UpperCamelCase__ : list[int] ) -> list[int]:
lowerCamelCase : int = 1
lowerCamelCase : Union[str, Any] = max(UpperCamelCase__ )
while placement <= max_digit:
# declare and initialize empty buckets
lowerCamelCase : list[list] = [[] for _ in range(UpperCamelCase__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
lowerCamelCase : Any = int((i / placement) % RADIX )
buckets[tmp].append(UpperCamelCase__ )
# put each buckets' contents into list_of_ints
lowerCamelCase : Dict = 0
for b in range(UpperCamelCase__ ):
for i in buckets[b]:
lowerCamelCase : List[str] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42
| 0
|
"""simple docstring"""
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class A__ :
"""simple docstring"""
def __init__( self: Optional[int] , __a: str = "cpu" , __a: str = "openai/clip-vit-large-patch14" )-> None:
lowerCamelCase : List[str] = device
lowerCamelCase : Tuple = CLIPTokenizerFast.from_pretrained(__a )
lowerCamelCase : Dict = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73]
lowerCamelCase : Dict = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11]
lowerCamelCase : Tuple = torchvision.transforms.Normalize(self.image_mean , self.image_std )
lowerCamelCase : List[Any] = torchvision.transforms.Resize(224 )
lowerCamelCase : Dict = torchvision.transforms.CenterCrop(224 )
def a__ ( self: Tuple , __a: str )-> List[str]:
lowerCamelCase : List[str] = self.resize(__a )
lowerCamelCase : str = self.center_crop(__a )
lowerCamelCase : int = self.normalize(__a )
return images
def __call__( self: int , __a: Optional[Any]=None , __a: Dict=None , **__a: int )-> Tuple:
lowerCamelCase : str = self.tokenizer(text=__a , **__a )
lowerCamelCase : Optional[int] = self.preprocess_img(__a )
lowerCamelCase : str = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class A__ ( nn.Module):
"""simple docstring"""
def __init__( self: List[str] , __a: Optional[Any]=10 , __a: Dict=0.01 , __a: Optional[Any]=None , __a: List[Any]=None , __a: Any=None , __a: int=None , __a: Tuple=None , __a: int=None , __a: Union[str, Any]=False , __a: str=True , __a: List[str]="image" , __a: Any=True , __a: Dict=False , __a: str=False , __a: Any=False , )-> None:
super().__init__()
lowerCamelCase : Any = None
lowerCamelCase : Optional[int] = device if device else get_device()
if vqgan:
lowerCamelCase : List[Any] = vqgan
else:
lowerCamelCase : Union[str, Any] = load_vqgan(self.device , conf_path=__a , ckpt_path=__a )
self.vqgan.eval()
if clip:
lowerCamelCase : str = clip
else:
lowerCamelCase : Tuple = CLIPModel.from_pretrained("""openai/clip-vit-base-patch32""" )
self.clip.to(self.device )
lowerCamelCase : Any = ProcessorGradientFlow(device=self.device )
lowerCamelCase : List[Any] = iterations
lowerCamelCase : str = lr
lowerCamelCase : Optional[int] = log
lowerCamelCase : List[Any] = make_grid
lowerCamelCase : Optional[int] = return_val
lowerCamelCase : Optional[int] = quantize
lowerCamelCase : List[Any] = self.vqgan.decoder.z_shape
def a__ ( self: int , __a: Optional[Any]=None , __a: List[Any]=None , __a: List[Any]=5 , __a: List[str]=True )-> List[Any]:
lowerCamelCase : Optional[int] = []
if output_path is None:
lowerCamelCase : Optional[int] = """./animation.gif"""
if input_path is None:
lowerCamelCase : List[str] = self.save_path
lowerCamelCase : str = sorted(glob(input_path + """/*""" ) )
if not len(__a ):
raise ValueError(
"""No images found in save path, aborting (did you pass save_intermediate=True to the generate"""
""" function?)""" )
if len(__a ) == 1:
print("""Only one image found in save path, (did you pass save_intermediate=True to the generate function?)""" )
lowerCamelCase : List[Any] = total_duration / len(__a )
lowerCamelCase : int = [frame_duration] * len(__a )
if extend_frames:
lowerCamelCase : Tuple = 1.5
lowerCamelCase : str = 3
for file_name in paths:
if file_name.endswith(""".png""" ):
images.append(imageio.imread(__a ) )
imageio.mimsave(__a , __a , duration=__a )
print(f'gif saved to {output_path}' )
def a__ ( self: Optional[Any] , __a: Tuple=None , __a: Any=None )-> Optional[int]:
if not (path or img):
raise ValueError("""Input either path or tensor""" )
if img is not None:
raise NotImplementedError
lowerCamelCase : Tuple = preprocess(Image.open(__a ) , target_image_size=256 ).to(self.device )
lowerCamelCase : Dict = preprocess_vqgan(__a )
lowerCamelCase : List[str] = self.vqgan.encode(__a )
return z
def a__ ( self: Optional[Any] , __a: Dict )-> Dict:
lowerCamelCase : List[str] = self.latent.detach().requires_grad_()
lowerCamelCase : int = base_latent + transform_vector
if self.quantize:
lowerCamelCase : Union[str, Any] = self.vqgan.quantize(__a )
else:
lowerCamelCase : Optional[Any] = trans_latent
return self.vqgan.decode(__a )
def a__ ( self: str , __a: Optional[int] , __a: List[str] , __a: List[Any]=None )-> Optional[Any]:
lowerCamelCase : Optional[int] = self.clip_preprocessor(text=__a , images=__a , return_tensors="""pt""" , padding=__a )
lowerCamelCase : Optional[int] = self.clip(**__a )
lowerCamelCase : Optional[int] = clip_outputs.logits_per_image
if weights is not None:
lowerCamelCase : List[str] = similarity_logits * weights
return similarity_logits.sum()
def a__ ( self: Any , __a: Tuple , __a: Tuple , __a: int )-> str:
lowerCamelCase : List[Any] = self._get_clip_similarity(pos_prompts["""prompts"""] , __a , weights=(1 / pos_prompts["""weights"""]) )
if neg_prompts:
lowerCamelCase : Optional[Any] = self._get_clip_similarity(neg_prompts["""prompts"""] , __a , weights=neg_prompts["""weights"""] )
else:
lowerCamelCase : int = torch.tensor([1] , device=self.device )
lowerCamelCase : Dict = -torch.log(__a ) + torch.log(__a )
return loss
def a__ ( self: Optional[int] , __a: List[str] , __a: Optional[int] , __a: Dict )-> int:
lowerCamelCase : Tuple = torch.randn_like(self.latent , requires_grad=__a , device=self.device )
lowerCamelCase : int = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
lowerCamelCase : Optional[Any] = self._add_vector(__a )
lowerCamelCase : Optional[Any] = loop_post_process(__a )
lowerCamelCase : Dict = self._get_CLIP_loss(__a , __a , __a )
print("""CLIP loss""" , __a )
if self.log:
wandb.log({"""CLIP Loss""": clip_loss} )
clip_loss.backward(retain_graph=__a )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def a__ ( self: Optional[Any] , __a: Optional[int] , __a: Optional[int] , __a: Tuple )-> List[Any]:
wandb.init(reinit=__a , project="""face-editor""" )
wandb.config.update({"""Positive Prompts""": positive_prompts} )
wandb.config.update({"""Negative Prompts""": negative_prompts} )
wandb.config.update({"""lr""": self.lr, """iterations""": self.iterations} )
if image_path:
lowerCamelCase : Dict = Image.open(__a )
lowerCamelCase : Optional[Any] = image.resize((256, 256) )
wandb.log("""Original Image""" , wandb.Image(__a ) )
def a__ ( self: str , __a: Union[str, Any] )-> Dict:
if not prompts:
return []
lowerCamelCase : Dict = []
lowerCamelCase : str = []
if isinstance(__a , __a ):
lowerCamelCase : int = [prompt.strip() for prompt in prompts.split("""|""" )]
for prompt in prompts:
if isinstance(__a , (tuple, list) ):
lowerCamelCase : str = prompt[0]
lowerCamelCase : Optional[Any] = float(prompt[1] )
elif ":" in prompt:
lowerCamelCase : str = prompt.split(""":""" )
lowerCamelCase : int = float(__a )
else:
lowerCamelCase : Union[str, Any] = prompt
lowerCamelCase : Optional[int] = 1.0
processed_prompts.append(__a )
weights.append(__a )
return {
"prompts": processed_prompts,
"weights": torch.tensor(__a , device=self.device ),
}
def a__ ( self: List[Any] , __a: int , __a: Any=None , __a: Tuple=None , __a: Tuple=True , __a: Any=False , __a: Any=True , __a: List[str]=True , __a: int=None , )-> Union[str, Any]:
if image_path:
lowerCamelCase : str = self._get_latent(__a )
else:
lowerCamelCase : List[str] = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(__a , __a , __a )
assert pos_prompts, "You must provide at least one positive prompt."
lowerCamelCase : int = self.process_prompts(__a )
lowerCamelCase : int = self.process_prompts(__a )
if save_final and save_path is None:
lowerCamelCase : str = os.path.join("""./outputs/""" , """_""".join(pos_prompts["""prompts"""] ) )
if not os.path.exists(__a ):
os.makedirs(__a )
else:
lowerCamelCase : Dict = save_path + """_""" + get_timestamp()
os.makedirs(__a )
lowerCamelCase : Tuple = save_path
lowerCamelCase : str = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("""Original Image""" )
show_pil(custom_to_pil(__a ) )
lowerCamelCase : Optional[int] = loop_post_process(__a )
for iter, transformed_img in enumerate(self._optimize_CLIP(__a , __a , __a ) ):
if show_intermediate:
show_pil(__a )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f'iter_{iter:03d}.png' ) )
if self.log:
wandb.log({"""Image""": wandb.Image(__a )} )
if show_final:
show_pil(__a )
if save_final:
transformed_img.save(os.path.join(self.save_path , f'iter_{iter:03d}_final.png' ) )
| 710
|
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=None ) -> Tuple:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match'
lowerCamelCase : Dict = nn.Parameter(UpperCamelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match'
lowerCamelCase : Any = nn.Parameter(UpperCamelCase__ )
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ) -> Union[str, Any]:
# set torch weights for 1-to-1 comparison
lowerCamelCase : Dict = np.asarray(weights[0] )
lowerCamelCase : List[Any] = np.asarray(weights[1] )
lowerCamelCase : List[str] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any ) -> List[Any]:
# set torch weights for 1-to-1 comparison
lowerCamelCase : Tuple = np.asarray(weights[0] )
lowerCamelCase : Any = np.asarray(weights[1] )
lowerCamelCase : List[Any] = np.asarray(weights[2] )
lowerCamelCase : List[str] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , )
def snake_case ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ) -> Optional[Any]:
# layernorm 1
lowerCamelCase : str = weights[0][0][0]
lowerCamelCase : Optional[int] = np.asarray(layer_norm_a[0] )
lowerCamelCase : Tuple = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# lsh weights + output
lowerCamelCase : List[Any] = weights[0][1]
if len(UpperCamelCase__ ) < 4:
set_layer_weights_in_torch_lsh(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ )
else:
set_layer_weights_in_torch_local(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ )
# intermediate weighs
lowerCamelCase : int = weights[2][0][1][2]
# Chunked Feed Forward
if len(UpperCamelCase__ ) == 4:
lowerCamelCase : Dict = intermediate_weights[2]
# layernorm 2
lowerCamelCase : Optional[int] = np.asarray(intermediate_weights[0][0] )
lowerCamelCase : Tuple = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# intermediate dense
lowerCamelCase : Optional[Any] = np.asarray(intermediate_weights[1][0] )
lowerCamelCase : Union[str, Any] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
# intermediate out
lowerCamelCase : Optional[int] = np.asarray(intermediate_weights[4][0] )
lowerCamelCase : List[Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int ) -> List[Any]:
# reformer model
lowerCamelCase : List[Any] = torch_model.reformer
# word embeds
lowerCamelCase : Union[str, Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(UpperCamelCase__ ) , )
if isinstance(weights[3] , UpperCamelCase__ ):
lowerCamelCase : Union[str, Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCamelCase : str = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'{position_embeddings[emb_idx]} emb does not match'
lowerCamelCase : Dict = nn.Parameter(torch.tensor(UpperCamelCase__ ) )
lowerCamelCase : int = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
UpperCamelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCamelCase : Dict = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# output layer norm
lowerCamelCase : Any = np.asarray(weights[7][0] )
lowerCamelCase : List[str] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , )
# output embeddings
lowerCamelCase : List[Any] = np.asarray(weights[9][0] )
lowerCamelCase : Optional[int] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , )
def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str ) -> Optional[int]:
# Initialise PyTorch model
lowerCamelCase : Union[str, Any] = ReformerConfig.from_json_file(UpperCamelCase__ )
print(F'Building PyTorch model from configuration: {config}' )
lowerCamelCase : str = ReformerModelWithLMHead(UpperCamelCase__ )
with open(UpperCamelCase__ , """rb""" ) as f:
lowerCamelCase : str = pickle.load(UpperCamelCase__ )["""weights"""]
set_model_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , config.hidden_size )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
__lowerCamelCase :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCamelCase :Optional[int] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 42
| 0
|
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Dict )-> Optional[Any]:
lowerCamelCase : Any = """ylacombe/bark-small"""
lowerCamelCase : int = tempfile.mkdtemp()
lowerCamelCase : int = """en_speaker_1"""
lowerCamelCase : List[str] = """This is a test string"""
lowerCamelCase : Optional[Any] = """speaker_embeddings_path.json"""
lowerCamelCase : int = """speaker_embeddings"""
def a__ ( self: Optional[Any] , **__a: Any )-> Optional[Any]:
return AutoTokenizer.from_pretrained(self.checkpoint , **__a )
def a__ ( self: List[str] )-> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def a__ ( self: Optional[Any] )-> Any:
lowerCamelCase : Optional[Any] = self.get_tokenizer()
lowerCamelCase : Optional[int] = BarkProcessor(tokenizer=__a )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase : Any = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def a__ ( self: Dict )-> List[str]:
lowerCamelCase : str = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowerCamelCase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCamelCase : Optional[Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : int = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowerCamelCase : str = 35
lowerCamelCase : Tuple = 2
lowerCamelCase : Any = 8
lowerCamelCase : Dict = {
"""semantic_prompt""": np.ones(__a ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowerCamelCase : Optional[int] = processor(text=self.input_string , voice_preset=__a )
lowerCamelCase : Tuple = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__a , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowerCamelCase : Optional[int] = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(__a , **__a )
lowerCamelCase : Any = processor(text=self.input_string , voice_preset=__a )
lowerCamelCase : int = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(__a , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowerCamelCase : Any = processor(text=self.input_string , voice_preset=self.voice_preset )
def a__ ( self: int )-> Union[str, Any]:
lowerCamelCase : Optional[int] = self.get_tokenizer()
lowerCamelCase : Dict = BarkProcessor(tokenizer=__a )
lowerCamelCase : List[Any] = processor(text=self.input_string )
lowerCamelCase : Union[str, Any] = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=__a , return_attention_mask=__a , return_token_type_ids=__a , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 711
|
"""simple docstring"""
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class A__ ( nn.Module):
"""simple docstring"""
def __init__( self: Dict )-> Dict:
super().__init__()
lowerCamelCase : Tuple = nn.Linear(3 , 4 )
lowerCamelCase : Optional[Any] = nn.BatchNormad(4 )
lowerCamelCase : Optional[Any] = nn.Linear(4 , 5 )
def a__ ( self: List[str] , __a: List[Any] )-> Optional[Any]:
return self.lineara(self.batchnorm(self.lineara(__a ) ) )
class A__ ( __lowercase):
"""simple docstring"""
def a__ ( self: Tuple , __a: int , *__a: Any , **__a: Tuple )-> Tuple:
return (args[0] + 1,) + args[1:], kwargs
class A__ ( __lowercase):
"""simple docstring"""
def a__ ( self: Optional[int] , __a: List[str] , __a: List[Any] )-> List[str]:
return output + 1
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: int )-> str:
lowerCamelCase : List[str] = ModelForTest()
lowerCamelCase : Dict = ModelHook()
add_hook_to_module(__a , __a )
self.assertEqual(test_model._hf_hook , __a )
self.assertTrue(hasattr(__a , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__a )
self.assertFalse(hasattr(__a , """_hf_hook""" ) )
self.assertFalse(hasattr(__a , """_old_forward""" ) )
def a__ ( self: int )-> str:
lowerCamelCase : List[str] = ModelForTest()
lowerCamelCase : Union[str, Any] = ModelHook()
add_hook_to_module(__a , __a )
add_hook_to_module(__a , __a , append=__a )
self.assertEqual(isinstance(test_model._hf_hook , __a ) , __a )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__a , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__a )
self.assertFalse(hasattr(__a , """_hf_hook""" ) )
self.assertFalse(hasattr(__a , """_old_forward""" ) )
def a__ ( self: List[Any] )-> List[str]:
lowerCamelCase : str = ModelForTest()
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : Union[str, Any] = test_model(x + 1 )
lowerCamelCase : Optional[int] = test_model(x + 2 )
lowerCamelCase : List[Any] = PreForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[int] = test_model(__a )
self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCamelCase : Dict = PreForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Tuple = test_model(__a )
self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCamelCase : Any = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[Any] = test_model(__a )
assert torch.allclose(__a , __a , atol=1e-5 )
def a__ ( self: Any )-> Optional[int]:
lowerCamelCase : str = ModelForTest()
lowerCamelCase : List[str] = torch.randn(2 , 3 )
lowerCamelCase : int = test_model(__a )
lowerCamelCase : Dict = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Tuple = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 , atol=1e-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
lowerCamelCase : str = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : Optional[Any] = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 , atol=1e-5 ) )
# You need to use the sequential hook to chain two or more hooks
lowerCamelCase : Union[str, Any] = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__a , __a )
lowerCamelCase : str = test_model(__a )
assert torch.allclose(__a , output + 2 , atol=1e-5 )
def a__ ( self: int )-> Dict:
lowerCamelCase : List[Any] = ModelForTest()
lowerCamelCase : Optional[int] = torch.randn(2 , 3 )
lowerCamelCase : List[str] = test_model(__a )
lowerCamelCase : Any = PostForwardHook()
add_hook_to_module(__a , __a )
lowerCamelCase : str = test_model(__a )
self.assertTrue(torch.allclose(__a , output + 1 ) )
self.assertTrue(outputa.requires_grad )
lowerCamelCase : Optional[int] = True
lowerCamelCase : Optional[int] = test_model(__a )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def a__ ( self: List[str] )-> Union[str, Any]:
lowerCamelCase : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
lowerCamelCase : str = torch.randn(2 , 3 )
lowerCamelCase : Dict = model(__a )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__a , AlignDevicesHook(io_same_device=__a ) )
lowerCamelCase : Optional[int] = torch.randn(2 , 3 ).to(0 )
lowerCamelCase : str = model(__a )
self.assertEqual(output.device , torch.device(0 ) )
def a__ ( self: List[str] )-> Tuple:
lowerCamelCase : Union[str, Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : Tuple = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Optional[Any] = torch.randn(2 , 3 )
lowerCamelCase : Optional[Any] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
lowerCamelCase : Any = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : int = torch.randn(2 , 3 )
lowerCamelCase : Optional[int] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def a__ ( self: Any )-> List[str]:
lowerCamelCase : int = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : int = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__a , execution_device=__a , offload=__a )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(__a )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : Optional[Any] = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__a , execution_device=__a , offload=__a , offload_buffers=__a )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : Optional[int] = torch.randn(2 , 3 )
lowerCamelCase : int = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def a__ ( self: Optional[Any] )-> List[Any]:
lowerCamelCase : List[Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
lowerCamelCase : Any = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__a , execution_device=__a , offload=__a , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
lowerCamelCase : List[Any] = torch.device(__a )
self.assertEqual(model.batchnorm.running_mean.device , __a )
lowerCamelCase : Dict = torch.randn(2 , 3 )
lowerCamelCase : int = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__a , execution_device=__a , offload=__a , weights_map=model.state_dict() , offload_buffers=__a , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
lowerCamelCase : Tuple = torch.randn(2 , 3 )
lowerCamelCase : Any = model(__a )
self.assertEqual(output.device , __a )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__a )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 42
| 0
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
__lowerCamelCase :Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase :List[Any] = {'vocab_file': 'vocab.txt'}
__lowerCamelCase :Dict = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
__lowerCamelCase :Union[str, Any] = {
'YituTech/conv-bert-base': 512,
'YituTech/conv-bert-medium-small': 512,
'YituTech/conv-bert-small': 512,
}
__lowerCamelCase :str = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Dict =VOCAB_FILES_NAMES
snake_case__ : Optional[int] =PRETRAINED_VOCAB_FILES_MAP
snake_case__ : List[Any] =PRETRAINED_INIT_CONFIGURATION
snake_case__ : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Optional[int] =ConvBertTokenizer
def __init__( self: Optional[Any] , __a: Optional[int]=None , __a: List[str]=None , __a: Optional[Any]=True , __a: List[Any]="[UNK]" , __a: Optional[Any]="[SEP]" , __a: Optional[Any]="[PAD]" , __a: int="[CLS]" , __a: str="[MASK]" , __a: Tuple=True , __a: Optional[int]=None , **__a: Any , )-> Optional[int]:
super().__init__(
__a , tokenizer_file=__a , do_lower_case=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , tokenize_chinese_chars=__a , strip_accents=__a , **__a , )
lowerCamelCase : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __a ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __a ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __a ) != tokenize_chinese_chars
):
lowerCamelCase : List[str] = getattr(__a , normalizer_state.pop("""type""" ) )
lowerCamelCase : Tuple = do_lower_case
lowerCamelCase : List[Any] = strip_accents
lowerCamelCase : List[Any] = tokenize_chinese_chars
lowerCamelCase : List[str] = normalizer_class(**__a )
lowerCamelCase : Any = do_lower_case
def a__ ( self: Union[str, Any] , __a: str , __a: Optional[Any]=None )-> Any:
lowerCamelCase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a__ ( self: Any , __a: List[int] , __a: Optional[List[int]] = None )-> List[int]:
lowerCamelCase : Optional[int] = [self.sep_token_id]
lowerCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self: int , __a: str , __a: Optional[str] = None )-> Tuple[str]:
lowerCamelCase : List[Any] = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
| 712
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCamelCase :Optional[Any] = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Union[str, Any] = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 0
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
def snake_case ( UpperCamelCase__ : list[Any] ) -> None:
create_state_space_tree(UpperCamelCase__ , [] , 0 )
def snake_case ( UpperCamelCase__ : list[Any] , UpperCamelCase__ : list[Any] , UpperCamelCase__ : int ) -> None:
if index == len(UpperCamelCase__ ):
print(UpperCamelCase__ )
return
create_state_space_tree(UpperCamelCase__ , UpperCamelCase__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(UpperCamelCase__ , UpperCamelCase__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
__lowerCamelCase :list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['A', 'B', 'C'])
generate_all_subsequences(seq)
| 713
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self: Optional[Any] , __a: Union[str, Any] , __a: Optional[Any]=13 , __a: Optional[Any]=32 , __a: Dict=3 , __a: int=4 , __a: Dict=[10, 20, 30, 40] , __a: int=[2, 2, 3, 2] , __a: Any=True , __a: List[Any]=True , __a: Any=37 , __a: Optional[int]="gelu" , __a: List[str]=10 , __a: Optional[int]=0.02 , __a: Dict=["stage2", "stage3", "stage4"] , __a: List[str]=[2, 3, 4] , __a: List[str]=None , )-> Union[str, Any]:
lowerCamelCase : Optional[int] = parent
lowerCamelCase : Optional[int] = batch_size
lowerCamelCase : Any = image_size
lowerCamelCase : Tuple = num_channels
lowerCamelCase : str = num_stages
lowerCamelCase : List[str] = hidden_sizes
lowerCamelCase : str = depths
lowerCamelCase : Dict = is_training
lowerCamelCase : Optional[Any] = use_labels
lowerCamelCase : List[str] = intermediate_size
lowerCamelCase : List[str] = hidden_act
lowerCamelCase : List[str] = num_labels
lowerCamelCase : Union[str, Any] = initializer_range
lowerCamelCase : List[Any] = out_features
lowerCamelCase : Optional[Any] = out_indices
lowerCamelCase : int = scope
def a__ ( self: str )-> Optional[Any]:
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Dict = None
if self.use_labels:
lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase : Any = self.get_config()
return config, pixel_values, labels
def a__ ( self: Dict )-> Union[str, Any]:
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def a__ ( self: Optional[Any] , __a: List[Any] , __a: Any , __a: int )-> List[Any]:
lowerCamelCase : Optional[int] = ConvNextModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a__ ( self: int , __a: Union[str, Any] , __a: List[Any] , __a: Tuple )-> Optional[int]:
lowerCamelCase : str = ConvNextForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self: List[Any] , __a: Any , __a: Optional[int] , __a: Tuple )-> List[str]:
lowerCamelCase : List[str] = ConvNextBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : int = model(__a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowerCamelCase : Tuple = None
lowerCamelCase : List[str] = ConvNextBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self: Optional[Any] )-> Any:
lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = config_and_inputs
lowerCamelCase : int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : int =(
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
snake_case__ : str =(
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
snake_case__ : Union[str, Any] =True
snake_case__ : Optional[int] =False
snake_case__ : Tuple =False
snake_case__ : Union[str, Any] =False
snake_case__ : Tuple =False
def a__ ( self: Optional[Any] )-> Union[str, Any]:
lowerCamelCase : Tuple = ConvNextModelTester(self )
lowerCamelCase : List[Any] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def a__ ( self: Optional[int] )-> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self: Optional[int] )-> Optional[Any]:
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def a__ ( self: int )-> Dict:
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def a__ ( self: Dict )-> Optional[Any]:
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def a__ ( self: int )-> List[Any]:
pass
def a__ ( self: Union[str, Any] )-> int:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Any = model_class(__a )
lowerCamelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
lowerCamelCase : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def a__ ( self: Optional[int] )-> str:
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: str )-> int:
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def a__ ( self: int )-> Optional[int]:
def check_hidden_states_output(__a: Tuple , __a: int , __a: Tuple ):
lowerCamelCase : str = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase : Tuple = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(__a ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowerCamelCase , lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : List[Any] = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Tuple = True
check_hidden_states_output(__a , __a , __a )
def a__ ( self: Dict )-> Optional[Any]:
lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def a__ ( self: Optional[Any] )-> Tuple:
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : str = ConvNextModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case ( ) -> Optional[int]:
lowerCamelCase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def a__ ( self: Dict )-> Union[str, Any]:
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def a__ ( self: List[str] )-> Dict:
lowerCamelCase : Tuple = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(__a )
lowerCamelCase : Dict = self.default_image_processor
lowerCamelCase : Union[str, Any] = prepare_img()
lowerCamelCase : Optional[Any] = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
lowerCamelCase : Any = model(**__a )
# verify the logits
lowerCamelCase : Union[str, Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase : Tuple = torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
@require_torch
class A__ ( unittest.TestCase , __lowercase):
"""simple docstring"""
snake_case__ : Union[str, Any] =(ConvNextBackbone,) if is_torch_available() else ()
snake_case__ : Optional[Any] =ConvNextConfig
snake_case__ : Optional[Any] =False
def a__ ( self: List[str] )-> int:
lowerCamelCase : Dict = ConvNextModelTester(self )
| 42
| 0
|
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A__ :
"""simple docstring"""
def __init__( self: Optional[int] , __a: str , __a: Union[str, Any]=13 , __a: str=30 , __a: int=2 , __a: Any=3 , __a: Optional[int]=True , __a: Union[str, Any]=True , __a: Optional[int]=32 , __a: Any=5 , __a: Optional[Any]=4 , __a: Optional[int]=37 , __a: Optional[int]="gelu" , __a: Optional[Any]=0.1 , __a: Dict=0.1 , __a: Any=10 , __a: Union[str, Any]=0.02 , __a: str=3 , __a: Dict=0.6 , __a: Optional[Any]=None , )-> Union[str, Any]:
lowerCamelCase : str = parent
lowerCamelCase : Optional[int] = batch_size
lowerCamelCase : Optional[Any] = image_size
lowerCamelCase : List[str] = patch_size
lowerCamelCase : Union[str, Any] = num_channels
lowerCamelCase : Optional[Any] = is_training
lowerCamelCase : Optional[int] = use_labels
lowerCamelCase : Union[str, Any] = hidden_size
lowerCamelCase : Dict = num_hidden_layers
lowerCamelCase : Optional[int] = num_attention_heads
lowerCamelCase : Tuple = intermediate_size
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : List[Any] = hidden_dropout_prob
lowerCamelCase : str = attention_probs_dropout_prob
lowerCamelCase : List[str] = type_sequence_label_size
lowerCamelCase : int = initializer_range
lowerCamelCase : Any = mask_ratio
lowerCamelCase : Optional[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowerCamelCase : Optional[Any] = (image_size // patch_size) ** 2
lowerCamelCase : Tuple = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def a__ ( self: Tuple )-> str:
lowerCamelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Union[str, Any] = None
if self.use_labels:
lowerCamelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def a__ ( self: Optional[Any] )-> Union[str, Any]:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def a__ ( self: Any , __a: Dict , __a: Any , __a: int )-> Union[str, Any]:
lowerCamelCase : Any = ViTMAEModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Optional[int] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self: Union[str, Any] , __a: Any , __a: Union[str, Any] , __a: Optional[Any] )-> Tuple:
lowerCamelCase : str = ViTMAEForPreTraining(__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a )
lowerCamelCase : Dict = (self.image_size // self.patch_size) ** 2
lowerCamelCase : str = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowerCamelCase : int = 1
lowerCamelCase : str = ViTMAEForPreTraining(__a )
model.to(__a )
model.eval()
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : List[str] = model(__a )
lowerCamelCase : Optional[Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def a__ ( self: Union[str, Any] )-> List[Any]:
lowerCamelCase : Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase : int = config_and_inputs
lowerCamelCase : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Union[str, Any] =(ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
snake_case__ : Union[str, Any] ={'''feature-extraction''': ViTMAEModel} if is_torch_available() else {}
snake_case__ : int =False
snake_case__ : str =False
snake_case__ : str =False
snake_case__ : Union[str, Any] =False
def a__ ( self: Optional[Any] )-> str:
lowerCamelCase : List[Any] = ViTMAEModelTester(self )
lowerCamelCase : List[Any] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def a__ ( self: Union[str, Any] )-> Any:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def a__ ( self: Any )-> Optional[Any]:
pass
def a__ ( self: Optional[Any] )-> Optional[Any]:
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Any = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def a__ ( self: int )-> str:
lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : List[str] = model_class(__a )
lowerCamelCase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : List[Any] = [*signature.parameters.keys()]
lowerCamelCase : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def a__ ( self: int )-> Dict:
lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: List[str] )-> Optional[Any]:
lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__a )
def a__ ( self: str , __a: Union[str, Any] , __a: List[str] , __a: List[str] )-> Any:
# make masks reproducible
np.random.seed(2 )
lowerCamelCase : str = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
lowerCamelCase : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowerCamelCase : Optional[int] = torch.from_numpy(__a )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowerCamelCase : Any = pt_noise
super().check_pt_tf_models(__a , __a , __a )
def a__ ( self: Optional[int] )-> List[str]:
lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Any = model_class(__a )
model.to(__a )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCamelCase : List[str] = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase : Dict = outputs[0].cpu().numpy()
lowerCamelCase : Dict = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
lowerCamelCase : Dict = model_class.from_pretrained(__a )
model.to(__a )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowerCamelCase : Optional[int] = model(**self._prepare_for_class(__a , __a ) )
# Make sure we don't have nans
lowerCamelCase : List[str] = after_outputs[0].cpu().numpy()
lowerCamelCase : Optional[Any] = 0
lowerCamelCase : List[str] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__a , 1e-5 )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def a__ ( self: Union[str, Any] )-> Union[str, Any]:
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def a__ ( self: str )-> Dict:
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def a__ ( self: List[str] )-> Tuple:
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def a__ ( self: Optional[int] )-> int:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def a__ ( self: Optional[Any] )-> int:
pass
@slow
def a__ ( self: str )-> Any:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : List[str] = ViTMAEModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case ( ) -> List[str]:
lowerCamelCase : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def a__ ( self: Any )-> int:
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def a__ ( self: Optional[int] )-> Tuple:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowerCamelCase : int = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(__a )
lowerCamelCase : Tuple = self.default_image_processor
lowerCamelCase : Dict = prepare_img()
lowerCamelCase : List[Any] = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowerCamelCase : Tuple = ViTMAEConfig()
lowerCamelCase : List[Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowerCamelCase : Union[str, Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
lowerCamelCase : Union[str, Any] = model(**__a , noise=torch.from_numpy(__a ).to(device=__a ) )
# verify the logits
lowerCamelCase : int = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase : str = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__a ) , atol=1e-4 ) )
| 714
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Optional[int] = logging.get_logger(__name__)
__lowerCamelCase :List[str] = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Optional[Any] ='''realm'''
def __init__( self: Union[str, Any] , __a: List[Any]=30_522 , __a: List[Any]=768 , __a: List[Any]=128 , __a: Union[str, Any]=12 , __a: Union[str, Any]=12 , __a: Optional[Any]=8 , __a: Dict=3_072 , __a: List[Any]="gelu_new" , __a: List[Any]=0.1 , __a: Tuple=0.1 , __a: Optional[Any]=512 , __a: Optional[int]=2 , __a: str=0.02 , __a: int=1e-1_2 , __a: Optional[Any]=256 , __a: Any=10 , __a: Dict=1e-3 , __a: Optional[Any]=5 , __a: Dict=320 , __a: Tuple=13_353_718 , __a: List[Any]=5_000 , __a: Dict=1 , __a: int=0 , __a: Dict=2 , **__a: List[str] , )-> Any:
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
# Common config
lowerCamelCase : Optional[Any] = vocab_size
lowerCamelCase : str = max_position_embeddings
lowerCamelCase : Dict = hidden_size
lowerCamelCase : Dict = retriever_proj_size
lowerCamelCase : Optional[Any] = num_hidden_layers
lowerCamelCase : List[str] = num_attention_heads
lowerCamelCase : Tuple = num_candidates
lowerCamelCase : int = intermediate_size
lowerCamelCase : Dict = hidden_act
lowerCamelCase : List[str] = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Optional[int] = initializer_range
lowerCamelCase : Dict = type_vocab_size
lowerCamelCase : Optional[Any] = layer_norm_eps
# Reader config
lowerCamelCase : List[str] = span_hidden_size
lowerCamelCase : Dict = max_span_width
lowerCamelCase : Optional[Any] = reader_layer_norm_eps
lowerCamelCase : Optional[int] = reader_beam_size
lowerCamelCase : List[Any] = reader_seq_len
# Retrieval config
lowerCamelCase : int = num_block_records
lowerCamelCase : Dict = searcher_beam_size
| 42
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class A__ ( __lowercase):
"""simple docstring"""
def a__ ( self: Union[str, Any] )-> Any:
lowerCamelCase : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(__a , """num_attention_heads""" ) )
self.parent.assertTrue(hasattr(__a , """num_encoder_blocks""" ) )
class A__ :
"""simple docstring"""
def __init__( self: int , __a: str , __a: Optional[Any]=13 , __a: Optional[int]=64 , __a: int=3 , __a: Union[str, Any]=4 , __a: Union[str, Any]=[2, 2, 2, 2] , __a: Tuple=[8, 4, 2, 1] , __a: Dict=[16, 32, 64, 128] , __a: int=[1, 4, 8, 16] , __a: Optional[Any]=[1, 2, 4, 8] , __a: int=True , __a: Optional[int]=True , __a: Tuple="gelu" , __a: int=0.1 , __a: List[Any]=0.1 , __a: Optional[int]=0.02 , __a: Optional[int]=3 , __a: str=None , )-> Optional[Any]:
lowerCamelCase : Any = parent
lowerCamelCase : Optional[int] = batch_size
lowerCamelCase : List[str] = image_size
lowerCamelCase : str = num_channels
lowerCamelCase : str = num_encoder_blocks
lowerCamelCase : Any = sr_ratios
lowerCamelCase : List[Any] = depths
lowerCamelCase : Any = hidden_sizes
lowerCamelCase : Tuple = downsampling_rates
lowerCamelCase : Tuple = num_attention_heads
lowerCamelCase : Tuple = is_training
lowerCamelCase : Tuple = use_labels
lowerCamelCase : Optional[int] = hidden_act
lowerCamelCase : List[Any] = hidden_dropout_prob
lowerCamelCase : Tuple = attention_probs_dropout_prob
lowerCamelCase : Optional[int] = initializer_range
lowerCamelCase : Optional[int] = num_labels
lowerCamelCase : Tuple = scope
def a__ ( self: Tuple )-> int:
lowerCamelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : int = None
if self.use_labels:
lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase : Tuple = self.get_config()
return config, pixel_values, labels
def a__ ( self: Optional[int] )-> str:
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def a__ ( self: List[str] , __a: str , __a: Dict , __a: Union[str, Any] )-> Tuple:
lowerCamelCase : Union[str, Any] = SegformerModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a )
lowerCamelCase : Tuple = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def a__ ( self: str , __a: Dict , __a: Tuple , __a: str )-> Optional[Any]:
lowerCamelCase : Tuple = self.num_labels
lowerCamelCase : List[Any] = SegformerForSemanticSegmentation(__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
lowerCamelCase : List[str] = model(__a , labels=__a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def a__ ( self: Union[str, Any] , __a: Dict , __a: Any , __a: Dict )-> Optional[Any]:
lowerCamelCase : Optional[Any] = 1
lowerCamelCase : Tuple = SegformerForSemanticSegmentation(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Optional[Any] = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(__a )
lowerCamelCase : int = model(__a , labels=__a )
self.parent.assertGreater(result.loss , 0.0 )
def a__ ( self: Optional[Any] )-> Dict:
lowerCamelCase : List[str] = self.prepare_config_and_inputs()
lowerCamelCase : List[Any] = config_and_inputs
lowerCamelCase : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =(
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
snake_case__ : Dict =(
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
snake_case__ : Optional[int] =True
snake_case__ : Optional[Any] =False
snake_case__ : Dict =False
snake_case__ : List[str] =False
def a__ ( self: Union[str, Any] )-> Tuple:
lowerCamelCase : Optional[Any] = SegformerModelTester(self )
lowerCamelCase : Optional[int] = SegformerConfigTester(self , config_class=__a )
def a__ ( self: Tuple )-> List[str]:
self.config_tester.run_common_tests()
def a__ ( self: List[Any] )-> str:
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: Dict )-> int:
lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*__a )
def a__ ( self: Dict )-> List[Any]:
lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*__a )
@unittest.skip("""SegFormer does not use inputs_embeds""" )
def a__ ( self: Union[str, Any] )-> List[Any]:
pass
@unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" )
def a__ ( self: Tuple )-> int:
pass
def a__ ( self: int )-> int:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : Any = model_class(__a )
lowerCamelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Any = [*signature.parameters.keys()]
lowerCamelCase : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def a__ ( self: Dict )-> Optional[Any]:
lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : int = True
for model_class in self.all_model_classes:
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : List[str] = False
lowerCamelCase : Tuple = True
lowerCamelCase : int = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase : List[Any] = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase : List[Any] = outputs.attentions
lowerCamelCase : Dict = sum(self.model_tester.depths )
self.assertEqual(len(__a ) , __a )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase : str = True
lowerCamelCase : int = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase : str = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase : int = outputs.attentions
self.assertEqual(len(__a ) , __a )
# verify the first attentions (first block, first layer)
lowerCamelCase : int = (self.model_tester.image_size // 4) ** 2
lowerCamelCase : List[Any] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
lowerCamelCase : List[Any] = (self.model_tester.image_size // 32) ** 2
lowerCamelCase : Any = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
lowerCamelCase : Optional[int] = len(__a )
# Check attention is always last and order is fine
lowerCamelCase : Any = True
lowerCamelCase : int = True
lowerCamelCase : str = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase : Any = model(**self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + 1 , len(__a ) )
lowerCamelCase : List[Any] = outputs.attentions
self.assertEqual(len(__a ) , __a )
# verify the first attentions (first block, first layer)
lowerCamelCase : Dict = (self.model_tester.image_size // 4) ** 2
lowerCamelCase : Any = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def a__ ( self: Optional[int] )-> str:
def check_hidden_states_output(__a: Dict , __a: List[Any] , __a: Any ):
lowerCamelCase : Any = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase : Tuple = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase : Tuple = outputs.hidden_states
lowerCamelCase : List[str] = self.model_tester.num_encoder_blocks
self.assertEqual(len(__a ) , __a )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : int = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Dict = True
check_hidden_states_output(__a , __a , __a )
def a__ ( self: Dict )-> int:
if not self.model_tester.is_training:
return
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : List[str] = True
for model_class in self.all_model_classes:
if model_class in get_values(__a ):
continue
lowerCamelCase : Optional[int] = model_class(__a )
model.to(__a )
model.train()
lowerCamelCase : List[Any] = self._prepare_for_class(__a , __a , return_labels=__a )
lowerCamelCase : str = model(**__a ).loss
loss.backward()
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def a__ ( self: Optional[Any] )-> Tuple:
pass
@slow
def a__ ( self: Any )-> Tuple:
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : int = SegformerModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case ( ) -> int:
lowerCamelCase : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class A__ ( unittest.TestCase):
"""simple docstring"""
@slow
def a__ ( self: List[Any] )-> Any:
# only resize + normalize
lowerCamelCase : Tuple = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__a , align=__a , do_random_crop=__a )
lowerCamelCase : Union[str, Any] = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
__a )
lowerCamelCase : Union[str, Any] = prepare_img()
lowerCamelCase : List[str] = image_processor(images=__a , return_tensors="""pt""" )
lowerCamelCase : Any = encoded_inputs.pixel_values.to(__a )
with torch.no_grad():
lowerCamelCase : Dict = model(__a )
lowerCamelCase : Any = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase : List[str] = torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __a , atol=1e-4 ) )
@slow
def a__ ( self: Any )-> str:
# only resize + normalize
lowerCamelCase : List[Any] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__a , align=__a , do_random_crop=__a )
lowerCamelCase : List[Any] = SegformerForSemanticSegmentation.from_pretrained(
"""nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(__a )
lowerCamelCase : Optional[int] = prepare_img()
lowerCamelCase : str = image_processor(images=__a , return_tensors="""pt""" )
lowerCamelCase : int = encoded_inputs.pixel_values.to(__a )
with torch.no_grad():
lowerCamelCase : Tuple = model(__a )
lowerCamelCase : Optional[int] = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase : List[Any] = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __a , atol=1e-1 ) )
@slow
def a__ ( self: Optional[Any] )-> Tuple:
# only resize + normalize
lowerCamelCase : Dict = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__a , align=__a , do_random_crop=__a )
lowerCamelCase : Union[str, Any] = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
__a )
lowerCamelCase : Optional[Any] = prepare_img()
lowerCamelCase : Optional[Any] = image_processor(images=__a , return_tensors="""pt""" )
lowerCamelCase : Optional[int] = encoded_inputs.pixel_values.to(__a )
with torch.no_grad():
lowerCamelCase : List[Any] = model(__a )
lowerCamelCase : Dict = outputs.logits.detach().cpu()
lowerCamelCase : List[Any] = image_processor.post_process_semantic_segmentation(outputs=__a , target_sizes=[(500, 300)] )
lowerCamelCase : Union[str, Any] = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , __a )
lowerCamelCase : Any = image_processor.post_process_semantic_segmentation(outputs=__a )
lowerCamelCase : List[str] = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , __a )
| 715
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Tuple = logging.get_logger(__name__)
__lowerCamelCase :Any = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple ='''glpn'''
def __init__( self: Dict , __a: List[str]=3 , __a: Optional[int]=4 , __a: Dict=[2, 2, 2, 2] , __a: str=[8, 4, 2, 1] , __a: Optional[int]=[32, 64, 160, 256] , __a: Dict=[7, 3, 3, 3] , __a: Dict=[4, 2, 2, 2] , __a: Optional[Any]=[1, 2, 5, 8] , __a: Tuple=[4, 4, 4, 4] , __a: int="gelu" , __a: Union[str, Any]=0.0 , __a: str=0.0 , __a: Union[str, Any]=0.02 , __a: str=0.1 , __a: Union[str, Any]=1e-6 , __a: Any=64 , __a: Dict=10 , __a: Union[str, Any]=-1 , **__a: Optional[Any] , )-> Dict:
super().__init__(**__a )
lowerCamelCase : Dict = num_channels
lowerCamelCase : Any = num_encoder_blocks
lowerCamelCase : Dict = depths
lowerCamelCase : List[str] = sr_ratios
lowerCamelCase : Dict = hidden_sizes
lowerCamelCase : Tuple = patch_sizes
lowerCamelCase : Optional[int] = strides
lowerCamelCase : Optional[Any] = mlp_ratios
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : List[str] = hidden_act
lowerCamelCase : Any = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : List[Any] = initializer_range
lowerCamelCase : Dict = drop_path_rate
lowerCamelCase : Any = layer_norm_eps
lowerCamelCase : Optional[Any] = decoder_hidden_size
lowerCamelCase : Tuple = max_depth
lowerCamelCase : Optional[Any] = head_in_index
| 42
| 0
|
"""simple docstring"""
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase :Optional[Any] = {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json'
),
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Optional[int] ='''xlm-prophetnet'''
snake_case__ : Any =['''past_key_values''']
snake_case__ : Dict ={
'''num_attention_heads''': '''num_encoder_attention_heads''',
}
def __init__( self: List[str] , __a: Optional[float] = 0.1 , __a: Optional[Union[str, Callable]] = "gelu" , __a: Optional[int] = 30_522 , __a: Optional[int] = 1_024 , __a: Optional[int] = 4_096 , __a: Optional[int] = 12 , __a: Optional[int] = 16 , __a: Optional[int] = 4_096 , __a: Optional[int] = 12 , __a: Optional[int] = 16 , __a: Optional[float] = 0.1 , __a: Optional[float] = 0.1 , __a: Optional[int] = 512 , __a: Optional[float] = 0.02 , __a: Optional[bool] = True , __a: Optional[bool] = True , __a: Optional[int] = 0 , __a: Optional[int] = 2 , __a: Optional[int] = 32 , __a: Optional[int] = 128 , __a: Optional[bool] = False , __a: Optional[float] = 0.0 , __a: Optional[bool] = True , __a: Optional[int] = 0 , __a: Optional[int] = 1 , __a: Optional[int] = 2 , **__a: Dict , )-> int:
lowerCamelCase : Optional[int] = vocab_size
lowerCamelCase : Dict = hidden_size
lowerCamelCase : str = encoder_ffn_dim
lowerCamelCase : int = num_encoder_layers
lowerCamelCase : Any = num_encoder_attention_heads
lowerCamelCase : Dict = decoder_ffn_dim
lowerCamelCase : Dict = num_decoder_layers
lowerCamelCase : Dict = num_decoder_attention_heads
lowerCamelCase : Optional[Any] = max_position_embeddings
lowerCamelCase : Union[str, Any] = init_std # Normal(0, this parameter)
lowerCamelCase : Optional[Any] = activation_function
# parameters for xlmprophetnet
lowerCamelCase : Dict = ngram
lowerCamelCase : Tuple = num_buckets
lowerCamelCase : Optional[int] = relative_max_distance
lowerCamelCase : Tuple = disable_ngram_loss
lowerCamelCase : List[str] = eps
# 3 Types of Dropout
lowerCamelCase : Optional[int] = attention_dropout
lowerCamelCase : str = activation_dropout
lowerCamelCase : Tuple = dropout
lowerCamelCase : Optional[int] = use_cache
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , add_cross_attention=__a , decoder_start_token_id=__a , **__a , )
@property
def a__ ( self: Optional[Any] )-> int:
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def a__ ( self: List[str] , __a: Any )-> Union[str, Any]:
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"""
""" `num_decoder_layers`.""" )
| 716
|
"""simple docstring"""
from __future__ import annotations
import math
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : int ) -> float:
lowerCamelCase : Dict = u
for i in range(1 , UpperCamelCase__ ):
lowerCamelCase : List[str] = temp * (u - i)
return temp
def snake_case ( ) -> None:
lowerCamelCase : List[Any] = int(input("""enter the numbers of values: """ ) )
lowerCamelCase : list[list[float]] = []
for _ in range(UpperCamelCase__ ):
y.append([] )
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
y[i].append(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = 0
print("""enter the values of parameters in a list: """ )
lowerCamelCase : Any = list(map(UpperCamelCase__ , input().split() ) )
print("""enter the values of corresponding parameters: """ )
for i in range(UpperCamelCase__ ):
lowerCamelCase : int = float(input() )
lowerCamelCase : Dict = int(input("""enter the value to interpolate: """ ) )
lowerCamelCase : List[Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , UpperCamelCase__ ):
for j in range(n - i ):
lowerCamelCase : str = y[j + 1][i - 1] - y[j][i - 1]
lowerCamelCase : Any = y[0][0]
for i in range(1 , UpperCamelCase__ ):
summ += (ucal(UpperCamelCase__ , UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 42
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
__lowerCamelCase :Dict = logging.get_logger(__name__)
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Dict =['''input_features''', '''attention_mask''']
def __init__( self: Optional[Any] , __a: Optional[Any]=80 , __a: Any=16_000 , __a: List[str]=0.0 , __a: Any=10 , __a: List[Any]=25 , __a: Union[str, Any]="hamming_window" , __a: Optional[int]=32_768.0 , __a: Optional[Any]=0.97 , __a: List[str]=1.0 , __a: Optional[int]=True , __a: Union[str, Any]=True , __a: Tuple=False , **__a: Tuple , )-> Optional[int]:
super().__init__(feature_size=__a , sampling_rate=__a , padding_value=__a , **__a )
lowerCamelCase : Union[str, Any] = feature_size
lowerCamelCase : Dict = sampling_rate
lowerCamelCase : int = padding_value
lowerCamelCase : Tuple = hop_length
lowerCamelCase : List[Any] = win_length
lowerCamelCase : Dict = frame_signal_scale
lowerCamelCase : Union[str, Any] = preemphasis_coeff
lowerCamelCase : Tuple = mel_floor
lowerCamelCase : Union[str, Any] = normalize_means
lowerCamelCase : Tuple = normalize_vars
lowerCamelCase : Dict = win_function
lowerCamelCase : Dict = return_attention_mask
lowerCamelCase : Any = win_length * sampling_rate // 1_000
lowerCamelCase : Any = hop_length * sampling_rate // 1_000
lowerCamelCase : Dict = optimal_fft_length(self.sample_size )
lowerCamelCase : Any = (self.n_fft // 2) + 1
def a__ ( self: int , __a: np.array )-> np.ndarray:
if self.win_function == "hamming_window":
lowerCamelCase : str = window_function(window_length=self.sample_size , name=self.win_function , periodic=__a )
else:
lowerCamelCase : Union[str, Any] = window_function(window_length=self.sample_size , name=self.win_function )
lowerCamelCase : List[str] = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
lowerCamelCase : Dict = spectrogram(
one_waveform * self.frame_signal_scale , window=__a , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=__a , preemphasis=self.preemphasis_coeff , mel_filters=__a , mel_floor=self.mel_floor , log_mel="""log""" , )
return msfc_features.T
def a__ ( self: Optional[int] , __a: Optional[Any] , __a: Tuple , __a: List[Any] )-> List[str]:
# make sure we normalize float32 arrays
if self.normalize_means:
lowerCamelCase : Optional[int] = x[:input_length].mean(axis=0 )
lowerCamelCase : Union[str, Any] = np.subtract(__a , __a )
if self.normalize_vars:
lowerCamelCase : int = x[:input_length].std(axis=0 )
lowerCamelCase : List[str] = np.divide(__a , __a )
if input_length < x.shape[0]:
lowerCamelCase : List[Any] = padding_value
# make sure array is in float32
lowerCamelCase : Optional[Any] = x.astype(np.floataa )
return x
def a__ ( self: Union[str, Any] , __a: List[np.ndarray] , __a: Optional[np.ndarray] = None )-> List[np.ndarray]:
lowerCamelCase : Union[str, Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(__a , __a , self.padding_value ) for x, n in zip(__a , __a )]
def __call__( self: int , __a: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __a: Union[bool, str, PaddingStrategy] = False , __a: Optional[int] = None , __a: bool = False , __a: Optional[int] = None , __a: Optional[bool] = None , __a: Optional[Union[str, TensorType]] = None , __a: Optional[int] = None , **__a: Any , )-> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
f' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
f' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
lowerCamelCase : str = isinstance(__a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
lowerCamelCase : int = is_batched_numpy or (
isinstance(__a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase : str = [np.asarray(__a , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__a , np.ndarray ):
lowerCamelCase : str = np.asarray(__a , dtype=np.floataa )
elif isinstance(__a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase : List[str] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase : str = [raw_speech]
# extract fbank features
lowerCamelCase : Optional[int] = [self._extract_mfsc_features(__a ) for one_waveform in raw_speech]
# convert into correct format for padding
lowerCamelCase : Dict = BatchFeature({"""input_features""": features} )
lowerCamelCase : Dict = self.pad(
__a , padding=__a , max_length=__a , truncation=__a , pad_to_multiple_of=__a , return_attention_mask=__a , **__a , )
# make sure list is in array format
lowerCamelCase : int = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , __a ):
lowerCamelCase : Optional[Any] = [np.asarray(__a , dtype=np.floataa ) for feature in input_features]
lowerCamelCase : Optional[int] = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
lowerCamelCase : Optional[Any] = [np.asarray(__a , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
lowerCamelCase : Tuple = (
np.array(__a , dtype=np.intaa )
if self._get_padding_strategies(__a , max_length=__a ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
lowerCamelCase : Union[str, Any] = self.normalize(
padded_inputs["""input_features"""] , attention_mask=__a )
if return_tensors is not None:
lowerCamelCase : Optional[int] = padded_inputs.convert_to_tensors(__a )
return padded_inputs
| 717
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__lowerCamelCase :str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[Any] = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
__lowerCamelCase :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 0
|
"""simple docstring"""
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
__lowerCamelCase :str = 10
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> int:
for i in range(UpperCamelCase__ , UpperCamelCase__ ):
if array[i] == target:
return i
return -1
def snake_case ( UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> int:
lowerCamelCase : Any = 0
lowerCamelCase : Tuple = len(UpperCamelCase__ )
while left <= right:
if right - left < precision:
return lin_search(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Optional[Any] = (left + right) // 3 + 1
lowerCamelCase : int = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowerCamelCase : Any = one_third - 1
elif array[two_third] < target:
lowerCamelCase : Optional[Any] = two_third + 1
else:
lowerCamelCase : Tuple = one_third + 1
lowerCamelCase : List[str] = two_third - 1
else:
return -1
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ) -> int:
if left < right:
if right - left < precision:
return lin_search(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = (left + right) // 3 + 1
lowerCamelCase : List[str] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(UpperCamelCase__ , one_third - 1 , UpperCamelCase__ , UpperCamelCase__ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , UpperCamelCase__ , UpperCamelCase__ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase :int = input('Enter numbers separated by comma:\n').strip()
__lowerCamelCase :Dict = [int(item.strip()) for item in user_input.split(',')]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
__lowerCamelCase :Any = int(input('Enter the number to be found in the list:\n').strip())
__lowerCamelCase :Union[str, Any] = ite_ternary_search(collection, target)
__lowerCamelCase :int = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F"""Iterative search: {target} found at positions: {resulta}""")
print(F"""Recursive search: {target} found at positions: {resulta}""")
else:
print('Not found')
| 718
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase :Dict = logging.get_logger()
def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : LevitConfig , UpperCamelCase__ : Path , UpperCamelCase__ : bool = True ) -> Dict:
print(F'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
lowerCamelCase : Optional[Any] = timm.create_model("""levit_128s""" , pretrained=UpperCamelCase__ )
else:
lowerCamelCase : Dict = timm.create_model("""levit_128""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 192:
lowerCamelCase : Tuple = timm.create_model("""levit_192""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 256:
lowerCamelCase : Optional[int] = timm.create_model("""levit_256""" , pretrained=UpperCamelCase__ )
if hidden_sizes == 384:
lowerCamelCase : Dict = timm.create_model("""levit_384""" , pretrained=UpperCamelCase__ )
from_model.eval()
lowerCamelCase : Optional[Any] = LevitForImageClassificationWithTeacher(UpperCamelCase__ ).eval()
lowerCamelCase : Tuple = OrderedDict()
lowerCamelCase : Optional[Any] = from_model.state_dict()
lowerCamelCase : str = list(from_model.state_dict().keys() )
lowerCamelCase : List[Any] = list(our_model.state_dict().keys() )
print(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for i in range(len(UpperCamelCase__ ) ):
lowerCamelCase : str = weights[og_keys[i]]
our_model.load_state_dict(UpperCamelCase__ )
lowerCamelCase : int = torch.randn((2, 3, 224, 224) )
lowerCamelCase : Any = from_model(UpperCamelCase__ )
lowerCamelCase : List[Any] = our_model(UpperCamelCase__ ).logits
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ ), "The model logits don't match the original one."
lowerCamelCase : Dict = name
print(UpperCamelCase__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowerCamelCase : Optional[int] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'Pushed {checkpoint_name}' )
def snake_case ( UpperCamelCase__ : Path , UpperCamelCase__ : str = None , UpperCamelCase__ : bool = True ) -> Optional[int]:
lowerCamelCase : Optional[Any] = """imagenet-1k-id2label.json"""
lowerCamelCase : List[Any] = 1000
lowerCamelCase : Dict = (1, num_labels)
lowerCamelCase : List[Any] = """huggingface/label-files"""
lowerCamelCase : Optional[int] = num_labels
lowerCamelCase : List[str] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase : Any = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase : List[Any] = idalabel
lowerCamelCase : str = {v: k for k, v in idalabel.items()}
lowerCamelCase : Tuple = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ )
lowerCamelCase : Optional[int] = {
"""levit-128S""": 128,
"""levit-128""": 128,
"""levit-192""": 192,
"""levit-256""": 256,
"""levit-384""": 384,
}
lowerCamelCase : List[Any] = {
"""levit-128S""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-128""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-192""": ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-256""": ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-384""": ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return config, expected_shape
if __name__ == "__main__":
__lowerCamelCase :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
__lowerCamelCase :List[Any] = parser.parse_args()
__lowerCamelCase :Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 42
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase :List[str] = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[Any] = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :List[Any] = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :List[str] = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :str = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__lowerCamelCase :Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 719
|
"""simple docstring"""
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple =(KDPMaDiscreteScheduler,)
snake_case__ : Tuple =10
def a__ ( self: List[Any] , **__a: Optional[int] )-> Union[str, Any]:
lowerCamelCase : int = {
"""num_train_timesteps""": 1_100,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**__a )
return config
def a__ ( self: Union[str, Any] )-> Any:
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__a )
def a__ ( self: str )-> int:
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def a__ ( self: int )-> Union[str, Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a )
def a__ ( self: List[Any] )-> List[Any]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def a__ ( self: Union[str, Any] )-> int:
lowerCamelCase : List[str] = self.scheduler_classes[0]
lowerCamelCase : Union[str, Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
lowerCamelCase : List[str] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase : Dict = self.dummy_model()
lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase : List[Any] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : Optional[Any] = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[int] = model(__a , __a )
lowerCamelCase : Tuple = scheduler.step(__a , __a , __a )
lowerCamelCase : Optional[Any] = output.prev_sample
lowerCamelCase : List[str] = torch.sum(torch.abs(__a ) )
lowerCamelCase : Tuple = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4e-0_7 ) < 1e-2
assert abs(result_mean.item() - 6.1_1_1_2e-1_0 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7 ) < 1e-2
assert abs(result_mean.item() - 0.00_02 ) < 1e-3
def a__ ( self: Any )-> Any:
if torch_device == "mps":
return
lowerCamelCase : Dict = self.scheduler_classes[0]
lowerCamelCase : Dict = self.get_scheduler_config()
lowerCamelCase : int = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase : List[Any] = self.dummy_model()
lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase : Optional[int] = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase : Dict = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[Any] = model(__a , __a )
lowerCamelCase : Tuple = scheduler.step(__a , __a , __a )
lowerCamelCase : str = output.prev_sample
lowerCamelCase : Tuple = torch.sum(torch.abs(__a ) )
lowerCamelCase : Tuple = torch.mean(torch.abs(__a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
def a__ ( self: Optional[Any] )-> List[Any]:
if torch_device == "mps":
return
lowerCamelCase : Any = self.scheduler_classes[0]
lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
lowerCamelCase : Optional[Any] = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
lowerCamelCase : Union[str, Any] = self.dummy_model()
lowerCamelCase : List[str] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowerCamelCase : Union[str, Any] = scheduler.scale_model_input(__a , __a )
lowerCamelCase : Optional[int] = model(__a , __a )
lowerCamelCase : int = scheduler.step(__a , __a , __a )
lowerCamelCase : int = output.prev_sample
lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(__a ) )
lowerCamelCase : int = torch.mean(torch.abs(__a ) )
if str(__a ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.41_25 ) < 1e-2
assert abs(result_mean.item() - 0.02_66 ) < 1e-3
| 42
| 0
|
"""simple docstring"""
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> float:
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str =StableDiffusionXLImgaImgPipeline
snake_case__ : Any =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
snake_case__ : Optional[int] =PipelineTesterMixin.required_optional_params - {'''latents'''}
snake_case__ : Dict =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case__ : Tuple =IMAGE_TO_IMAGE_IMAGE_PARAMS
snake_case__ : List[str] =IMAGE_TO_IMAGE_IMAGE_PARAMS
def a__ ( self: List[str] )-> int:
torch.manual_seed(0 )
lowerCamelCase : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=__a , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
lowerCamelCase : Any = EulerDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
lowerCamelCase : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=32 , )
lowerCamelCase : Dict = CLIPTextModel(__a )
lowerCamelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__a )
lowerCamelCase : Dict = CLIPTextModelWithProjection(__a )
lowerCamelCase : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__a )
lowerCamelCase : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def a__ ( self: Any , __a: str , __a: Tuple=0 )-> Union[str, Any]:
lowerCamelCase : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
lowerCamelCase : Any = image / 2 + 0.5
if str(__a ).startswith("""mps""" ):
lowerCamelCase : Dict = torch.manual_seed(__a )
else:
lowerCamelCase : Tuple = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : Tuple = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.75,
}
return inputs
def a__ ( self: Dict )-> Optional[Any]:
lowerCamelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase : Union[str, Any] = self.get_dummy_components()
lowerCamelCase : Optional[int] = StableDiffusionXLImgaImgPipeline(**__a )
lowerCamelCase : int = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Optional[Any] = self.get_dummy_inputs(__a )
lowerCamelCase : Optional[int] = sd_pipe(**__a ).images
lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase : Any = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a__ ( self: Optional[int] )-> Union[str, Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def a__ ( self: Optional[Any] )-> str:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def a__ ( self: List[str] )-> Optional[Any]:
pass
def a__ ( self: List[Any] )-> Union[str, Any]:
lowerCamelCase : Tuple = self.get_dummy_components()
lowerCamelCase : Union[str, Any] = StableDiffusionXLImgaImgPipeline(**__a )
lowerCamelCase : str = sd_pipe.to(__a )
lowerCamelCase : Any = sd_pipe.to(__a )
sd_pipe.set_progress_bar_config(disable=__a )
# forward without prompt embeds
lowerCamelCase : Dict = self.get_dummy_inputs(__a )
lowerCamelCase : Any = 3 * ["""this is a negative prompt"""]
lowerCamelCase : Optional[int] = negative_prompt
lowerCamelCase : Tuple = 3 * [inputs["""prompt"""]]
lowerCamelCase : List[Any] = sd_pipe(**__a )
lowerCamelCase : Optional[int] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCamelCase : Tuple = self.get_dummy_inputs(__a )
lowerCamelCase : List[Any] = 3 * ["""this is a negative prompt"""]
lowerCamelCase : Tuple = 3 * [inputs.pop("""prompt""" )]
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Union[str, Any] = sd_pipe.encode_prompt(__a , negative_prompt=__a )
lowerCamelCase : int = sd_pipe(
**__a , prompt_embeds=__a , negative_prompt_embeds=__a , pooled_prompt_embeds=__a , negative_pooled_prompt_embeds=__a , )
lowerCamelCase : Union[str, Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Dict )-> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self: Union[str, Any] , __a: Any , __a: Any="cpu" , __a: str=torch.floataa , __a: Any=0 )-> Optional[Any]:
lowerCamelCase : Optional[Any] = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase : List[Any] = np.random.RandomState(__a ).standard_normal((1, 4, 64, 64) )
lowerCamelCase : List[str] = torch.from_numpy(__a ).to(device=__a , dtype=__a )
lowerCamelCase : int = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def a__ ( self: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
lowerCamelCase : Optional[int] = self.get_inputs(__a )
lowerCamelCase : Optional[Any] = pipe(**__a ).images
lowerCamelCase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCamelCase : List[str] = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 42
| 0
|
"""simple docstring"""
from __future__ import annotations
from math import pow, sqrt
def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance == 0:
return {"resistance": sqrt(pow(UpperCamelCase__ , 2 ) - pow(UpperCamelCase__ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(UpperCamelCase__ , 2 ) - pow(UpperCamelCase__ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(UpperCamelCase__ , 2 ) + pow(UpperCamelCase__ , 2 ) )}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class A__ :
"""simple docstring"""
def a__ ( self: Optional[int] , __a: Optional[int] , __a: Tuple , __a: Optional[int] )-> List[str]:
return None
class A__ :
"""simple docstring"""
def a__ ( self: Optional[int] , __a: Tuple , __a: str , __a: str , __a: str )-> Tuple:
return None
class A__ ( unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] =[
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def a__ ( self: Optional[Any] )-> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , """tf""" , 12 , **__a )
@require_torch
@slow
def a__ ( self: str )-> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a , """pt""" , 12 , **__a )
@require_torch
@slow
def a__ ( self: Union[str, Any] )-> Dict:
from transformers import BertModel
lowerCamelCase : int = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(__a ) )
vocab_file.flush()
lowerCamelCase : Dict = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase : List[str] = BertModel(BertConfig(vocab_size=len(__a ) ) )
model.save_pretrained(__a )
self._test_export(__a , """pt""" , 12 , __a )
@require_tf
@slow
def a__ ( self: Optional[Any] )-> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase : Optional[int] = self._test_export(__a , """tf""" , 12 , **__a )
lowerCamelCase : Tuple = quantize(Path(__a ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def a__ ( self: Any )-> Optional[int]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase : Any = self._test_export(__a , """pt""" , 12 , **__a )
lowerCamelCase : Dict = quantize(__a )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def a__ ( self: List[Any] , __a: Optional[Any] , __a: List[Any] , __a: Union[str, Any] , __a: Optional[Any]=None , **__a: Optional[int] )-> Any:
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase : Optional[Any] = Path(__a ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__a , __a , __a , __a , __a , **__a )
return path
except Exception as e:
self.fail(__a )
@require_torch
@require_tokenizers
@slow
def a__ ( self: Tuple )-> Dict:
from transformers import BertModel
lowerCamelCase : int = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCamelCase : List[Any] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__a , __a , """pt""" )
@require_tf
@require_tokenizers
@slow
def a__ ( self: Optional[Any] )-> List[Any]:
from transformers import TFBertModel
lowerCamelCase : Union[str, Any] = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
lowerCamelCase : str = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__a , __a , """tf""" )
def a__ ( self: List[str] , __a: str , __a: Optional[Any] , __a: str )-> List[Any]:
lowerCamelCase : List[str] = FeatureExtractionPipeline(__a , __a )
lowerCamelCase : List[str] = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = infer_shapes(__a , __a )
# Assert all variables are present
self.assertEqual(len(__a ) , len(__a ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __a )
self.assertSequenceEqual(variable_names[3:] , __a )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} )
def a__ ( self: List[Any] )-> int:
lowerCamelCase : List[str] = ["""input_ids""", """attention_mask""", """token_type_ids"""]
lowerCamelCase : str = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncContiguousArgs() , __a , __a )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__a ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__a ) , set(__a ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__a , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncNonContiguousArgs() , __a , __a )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__a ) , 1 )
self.assertEqual(len(__a ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] , """input_ids""" )
def a__ ( self: Tuple )-> Tuple:
lowerCamelCase : Optional[int] = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
| 42
| 0
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase :str = logging.get_logger(__name__)
def snake_case ( UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Any ) -> int:
lowerCamelCase : int = original_name.split(""".""" )[0]
lowerCamelCase : int = key.split(""".""" )
lowerCamelCase : Optional[int] = int(key_list[key_list.index(UpperCamelCase__ ) - 2] )
lowerCamelCase : List[Any] = int(key_list[key_list.index(UpperCamelCase__ ) - 1] )
lowerCamelCase : List[str] = orig_block_num - offset
lowerCamelCase : Optional[int] = key.replace(F'{orig_block_num}.{layer_num}.{original_name}' , F'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def snake_case ( UpperCamelCase__ : Union[str, Any] ) -> str:
lowerCamelCase : Union[str, Any] = OrderedDict()
lowerCamelCase : Dict = 0, 0
for key, value in state_dict.items():
if key.startswith("""network""" ):
lowerCamelCase : Any = key.replace("""network""" , """poolformer.encoder""" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("""bias""" ) and "patch_embed" not in key:
patch_emb_offset += 1
lowerCamelCase : List[Any] = key[: key.find("""proj""" )]
lowerCamelCase : Optional[Any] = key.replace(UpperCamelCase__ , F'patch_embeddings.{total_embed_found}.' )
lowerCamelCase : str = key.replace("""proj""" , """projection""" )
if key.endswith("""bias""" ):
total_embed_found += 1
if "patch_embeddings" in key:
lowerCamelCase : Any = """poolformer.encoder.""" + key
if "mlp.fc1" in key:
lowerCamelCase : Optional[int] = replace_key_with_offset(UpperCamelCase__ , UpperCamelCase__ , """mlp.fc1""" , """output.conv1""" )
if "mlp.fc2" in key:
lowerCamelCase : str = replace_key_with_offset(UpperCamelCase__ , UpperCamelCase__ , """mlp.fc2""" , """output.conv2""" )
if "norm1" in key:
lowerCamelCase : List[Any] = replace_key_with_offset(UpperCamelCase__ , UpperCamelCase__ , """norm1""" , """before_norm""" )
if "norm2" in key:
lowerCamelCase : Dict = replace_key_with_offset(UpperCamelCase__ , UpperCamelCase__ , """norm2""" , """after_norm""" )
if "layer_scale_1" in key:
lowerCamelCase : str = replace_key_with_offset(UpperCamelCase__ , UpperCamelCase__ , """layer_scale_1""" , """layer_scale_1""" )
if "layer_scale_2" in key:
lowerCamelCase : Optional[int] = replace_key_with_offset(UpperCamelCase__ , UpperCamelCase__ , """layer_scale_2""" , """layer_scale_2""" )
if "head" in key:
lowerCamelCase : List[str] = key.replace("""head""" , """classifier""" )
lowerCamelCase : Dict = value
return new_state_dict
def snake_case ( ) -> Tuple:
lowerCamelCase : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase : Optional[Any] = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
return image
@torch.no_grad()
def snake_case ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] ) -> Optional[Any]:
lowerCamelCase : str = PoolFormerConfig()
# set attributes based on model_name
lowerCamelCase : int = """huggingface/label-files"""
lowerCamelCase : List[Any] = model_name[-3:]
lowerCamelCase : Tuple = 1000
lowerCamelCase : Dict = """imagenet-1k-id2label.json"""
lowerCamelCase : List[str] = (1, 1000)
# set config attributes
lowerCamelCase : List[str] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase : List[Any] = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase : Optional[Any] = idalabel
lowerCamelCase : Optional[int] = {v: k for k, v in idalabel.items()}
if size == "s12":
lowerCamelCase : Dict = [2, 2, 6, 2]
lowerCamelCase : List[str] = [64, 128, 320, 512]
lowerCamelCase : List[str] = 4.0
lowerCamelCase : Optional[Any] = 0.9
elif size == "s24":
lowerCamelCase : int = [4, 4, 12, 4]
lowerCamelCase : Optional[int] = [64, 128, 320, 512]
lowerCamelCase : Optional[Any] = 4.0
lowerCamelCase : List[Any] = 0.9
elif size == "s36":
lowerCamelCase : Optional[Any] = [6, 6, 18, 6]
lowerCamelCase : Dict = [64, 128, 320, 512]
lowerCamelCase : Union[str, Any] = 4.0
lowerCamelCase : int = 1E-6
lowerCamelCase : Tuple = 0.9
elif size == "m36":
lowerCamelCase : Tuple = [6, 6, 18, 6]
lowerCamelCase : int = [96, 192, 384, 768]
lowerCamelCase : Dict = 4.0
lowerCamelCase : str = 1E-6
lowerCamelCase : Tuple = 0.9_5
elif size == "m48":
lowerCamelCase : Optional[Any] = [8, 8, 24, 8]
lowerCamelCase : Optional[int] = [96, 192, 384, 768]
lowerCamelCase : List[Any] = 4.0
lowerCamelCase : List[str] = 1E-6
lowerCamelCase : Tuple = 0.9_5
else:
raise ValueError(F'Size {size} not supported' )
# load image processor
lowerCamelCase : Tuple = PoolFormerImageProcessor(crop_pct=UpperCamelCase__ )
# Prepare image
lowerCamelCase : Tuple = prepare_img()
lowerCamelCase : str = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
logger.info(F'Converting model {model_name}...' )
# load original state dict
lowerCamelCase : Tuple = torch.load(UpperCamelCase__ , map_location=torch.device("""cpu""" ) )
# rename keys
lowerCamelCase : List[Any] = rename_keys(UpperCamelCase__ )
# create HuggingFace model and load state dict
lowerCamelCase : Optional[Any] = PoolFormerForImageClassification(UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
model.eval()
# Define image processor
lowerCamelCase : Any = PoolFormerImageProcessor(crop_pct=UpperCamelCase__ )
lowerCamelCase : Dict = image_processor(images=prepare_img() , return_tensors="""pt""" ).pixel_values
# forward pass
lowerCamelCase : List[Any] = model(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = outputs.logits
# define expected logit slices for different models
if size == "s12":
lowerCamelCase : Union[str, Any] = torch.tensor([-0.3_0_4_5, -0.6_7_5_8, -0.4_8_6_9] )
elif size == "s24":
lowerCamelCase : Optional[Any] = torch.tensor([0.4_4_0_2, -0.1_3_7_4, -0.8_0_4_5] )
elif size == "s36":
lowerCamelCase : Any = torch.tensor([-0.6_0_8_0, -0.5_1_3_3, -0.5_8_9_8] )
elif size == "m36":
lowerCamelCase : Union[str, Any] = torch.tensor([0.3_9_5_2, 0.2_2_6_3, -1.2_6_6_8] )
elif size == "m48":
lowerCamelCase : List[Any] = torch.tensor([0.1_1_6_7, -0.0_6_5_6, -0.3_4_2_3] )
else:
raise ValueError(F'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1E-2 )
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__lowerCamelCase :Dict = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''poolformer_s12''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
__lowerCamelCase :Optional[int] = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 700
|
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Optional[int] )-> Union[str, Any]:
lowerCamelCase : Tuple = [10, 20, 30, 40, 50, 60]
lowerCamelCase : Union[str, Any] = [2, 4, 6, 8, 10, 12]
lowerCamelCase : Union[str, Any] = 100
self.assertEqual(kp.calc_profit(__a , __a , __a ) , 210 )
def a__ ( self: str )-> str:
self.assertRaisesRegex(__a , """max_weight must greater than zero.""" )
def a__ ( self: str )-> List[Any]:
self.assertRaisesRegex(__a , """Weight can not be negative.""" )
def a__ ( self: Any )-> Dict:
self.assertRaisesRegex(__a , """Profit can not be negative.""" )
def a__ ( self: Optional[Any] )-> List[Any]:
self.assertRaisesRegex(__a , """max_weight must greater than zero.""" )
def a__ ( self: Optional[Any] )-> Tuple:
self.assertRaisesRegex(
__a , """The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 42
| 0
|
"""simple docstring"""
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__lowerCamelCase :str = logging.getLogger(__name__)
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Any ='''summarization'''
snake_case__ : int =['''loss''']
snake_case__ : Optional[int] =ROUGE_KEYS
snake_case__ : str ='''rouge2'''
def __init__( self: int , __a: List[Any] , **__a: List[Any] )-> Optional[Any]:
if hparams.sortish_sampler and hparams.gpus > 1:
lowerCamelCase : List[Any] = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(__a , num_labels=__a , mode=self.mode , **__a )
use_task_specific_params(self.model , """summarization""" )
save_git_info(self.hparams.output_dir )
lowerCamelCase : str = Path(self.output_dir ) / """metrics.json"""
lowerCamelCase : Optional[Any] = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams , self.hparams_save_path )
lowerCamelCase : Optional[int] = 0
lowerCamelCase : int = defaultdict(__a )
lowerCamelCase : Union[str, Any] = self.config.model_type
lowerCamelCase : Optional[Any] = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
lowerCamelCase : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
lowerCamelCase : str = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
lowerCamelCase : str = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
lowerCamelCase : Optional[Any] = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f'target_lens: {self.target_lens}'
assert self.target_lens["train"] <= self.target_lens["test"], f'target_lens: {self.target_lens}'
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
lowerCamelCase : Optional[int] = get_git_info()["""repo_sha"""]
lowerCamelCase : str = hparams.num_workers
lowerCamelCase : str = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , __a ):
lowerCamelCase : Any = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
lowerCamelCase : List[str] = self.decoder_start_token_id
lowerCamelCase : str = (
SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
lowerCamelCase : Tuple = False
lowerCamelCase : Tuple = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
lowerCamelCase : Optional[Any] = self.hparams.eval_max_gen_length
else:
lowerCamelCase : Optional[Any] = self.model.config.max_length
lowerCamelCase : Optional[int] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def a__ ( self: Any , __a: Dict[str, torch.Tensor] )-> Dict[str, List[str]]:
lowerCamelCase : List[Any] = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(__a , Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" )
lowerCamelCase : Dict = True
return readable_batch
def a__ ( self: List[str] , __a: Any , **__a: Any )-> List[Any]:
return self.model(__a , **__a )
def a__ ( self: int , __a: List[int] )-> str:
lowerCamelCase : Union[str, Any] = self.tokenizer.batch_decode(
__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
return lmap(str.strip , __a )
def a__ ( self: Union[str, Any] , __a: dict )-> Tuple:
lowerCamelCase : Any = self.tokenizer.pad_token_id
lowerCamelCase : Optional[int] = batch["""input_ids"""], batch["""attention_mask"""]
lowerCamelCase : Optional[Any] = batch["""labels"""]
if isinstance(self.model , __a ):
lowerCamelCase : Optional[int] = self.model._shift_right(__a )
else:
lowerCamelCase : List[str] = shift_tokens_right(__a , __a )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
lowerCamelCase : Tuple = decoder_input_ids
self.save_readable_batch(__a )
lowerCamelCase : Optional[Any] = self(__a , attention_mask=__a , decoder_input_ids=__a , use_cache=__a )
lowerCamelCase : Union[str, Any] = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
lowerCamelCase : int = nn.CrossEntropyLoss(ignore_index=__a )
assert lm_logits.shape[-1] == self.vocab_size
lowerCamelCase : Optional[int] = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
lowerCamelCase : Union[str, Any] = nn.functional.log_softmax(__a , dim=-1 )
lowerCamelCase : List[Any] = label_smoothed_nll_loss(
__a , __a , self.hparams.label_smoothing , ignore_index=__a )
return (loss,)
@property
def a__ ( self: List[str] )-> int:
return self.tokenizer.pad_token_id
def a__ ( self: str , __a: Optional[Any] , __a: Dict )-> Dict:
lowerCamelCase : Union[str, Any] = self._step(__a )
lowerCamelCase : Optional[int] = dict(zip(self.loss_names , __a ) )
# tokens per batch
lowerCamelCase : int = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
lowerCamelCase : int = batch["""input_ids"""].shape[0]
lowerCamelCase : int = batch["""input_ids"""].eq(self.pad ).sum()
lowerCamelCase : List[str] = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def a__ ( self: int , __a: List[str] , __a: Any )-> Dict:
return self._generative_step(__a )
def a__ ( self: Any , __a: Any , __a: Any="val" )-> Dict:
self.step_count += 1
lowerCamelCase : Dict = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
lowerCamelCase : str = losses["""loss"""]
lowerCamelCase : int = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
lowerCamelCase : Optional[int] = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
lowerCamelCase : torch.FloatTensor = torch.tensor(__a ).type_as(__a )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(__a )
lowerCamelCase : str = {f'{prefix}_avg_{k}': x for k, x in losses.items()}
lowerCamelCase : int = self.step_count
self.metrics[prefix].append(__a ) # callback writes this to self.metrics_save_path
lowerCamelCase : Union[str, Any] = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f'{prefix}_loss': loss,
f'{prefix}_{self.val_metric}': metric_tensor,
}
def a__ ( self: List[str] , __a: List[Any] , __a: Union[str, Any] )-> Dict:
return calculate_rouge(__a , __a )
def a__ ( self: Union[str, Any] , __a: dict )-> dict:
lowerCamelCase : Tuple = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
lowerCamelCase : Any = self.model.generate(
batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=__a , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
lowerCamelCase : str = (time.time() - ta) / batch["""input_ids"""].shape[0]
lowerCamelCase : List[str] = self.ids_to_clean_text(__a )
lowerCamelCase : List[str] = self.ids_to_clean_text(batch["""labels"""] )
lowerCamelCase : str = self._step(__a )
lowerCamelCase : Union[str, Any] = dict(zip(self.loss_names , __a ) )
lowerCamelCase : Dict = self.calc_generative_metrics(__a , __a )
lowerCamelCase : List[str] = np.mean(lmap(__a , __a ) )
base_metrics.update(gen_time=__a , gen_len=__a , preds=__a , target=__a , **__a )
return base_metrics
def a__ ( self: List[str] , __a: Union[str, Any] , __a: Dict )-> Any:
return self._generative_step(__a )
def a__ ( self: Any , __a: Optional[int] )-> List[str]:
return self.validation_epoch_end(__a , prefix="""test""" )
def a__ ( self: str , __a: int )-> SeqaSeqDataset:
lowerCamelCase : Union[str, Any] = self.n_obs[type_path]
lowerCamelCase : int = self.target_lens[type_path]
lowerCamelCase : Union[str, Any] = self.dataset_class(
self.tokenizer , type_path=__a , n_obs=__a , max_target_length=__a , **self.dataset_kwargs , )
return dataset
def a__ ( self: Optional[int] , __a: str , __a: int , __a: bool = False )-> DataLoader:
lowerCamelCase : List[str] = self.get_dataset(__a )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
lowerCamelCase : Union[str, Any] = dataset.make_sortish_sampler(__a , distributed=self.hparams.gpus > 1 )
return DataLoader(
__a , batch_size=__a , collate_fn=dataset.collate_fn , shuffle=__a , num_workers=self.num_workers , sampler=__a , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
lowerCamelCase : List[Any] = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
__a , batch_sampler=__a , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
__a , batch_size=__a , collate_fn=dataset.collate_fn , shuffle=__a , num_workers=self.num_workers , sampler=__a , )
def a__ ( self: Optional[int] )-> DataLoader:
lowerCamelCase : Tuple = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=__a )
return dataloader
def a__ ( self: List[str] )-> DataLoader:
return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size )
def a__ ( self: Dict )-> DataLoader:
return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def a__ ( __a: str , __a: Optional[int] )-> Dict:
BaseTransformer.add_model_specific_args(__a , __a )
add_generic_args(__a , __a )
parser.add_argument(
"""--max_source_length""" , default=1_024 , type=__a , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--max_target_length""" , default=56 , type=__a , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--val_max_target_length""" , default=142 , type=__a , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--test_max_target_length""" , default=142 , type=__a , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument("""--freeze_encoder""" , action="""store_true""" )
parser.add_argument("""--freeze_embeds""" , action="""store_true""" )
parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=__a )
parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=__a )
parser.add_argument("""--max_tokens_per_batch""" , type=__a , default=__a )
parser.add_argument("""--logger_name""" , type=__a , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" )
parser.add_argument("""--n_train""" , type=__a , default=-1 , required=__a , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" , type=__a , default=500 , required=__a , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" , type=__a , default=-1 , required=__a , help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" , type=__a , default="""summarization""" , required=__a , help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" , type=__a , default=0.0 , required=__a )
parser.add_argument("""--src_lang""" , type=__a , default="""""" , required=__a )
parser.add_argument("""--tgt_lang""" , type=__a , default="""""" , required=__a )
parser.add_argument("""--eval_beams""" , type=__a , default=__a , required=__a )
parser.add_argument(
"""--val_metric""" , type=__a , default=__a , required=__a , choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" , type=__a , default=__a , help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" , type=__a , default=1 , required=__a , help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" , type=__a , default=-1 , required=__a , help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) , )
return parser
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Optional[Any] ='''translation'''
snake_case__ : Dict =['''loss''']
snake_case__ : Any =['''bleu''']
snake_case__ : List[str] ='''bleu'''
def __init__( self: Dict , __a: Union[str, Any] , **__a: Any )-> Union[str, Any]:
super().__init__(__a , **__a )
lowerCamelCase : List[str] = hparams.src_lang
lowerCamelCase : List[Any] = hparams.tgt_lang
def a__ ( self: List[str] , __a: Dict , __a: Dict )-> dict:
return calculate_bleu(__a , __a )
def snake_case ( UpperCamelCase__ : List[str] , UpperCamelCase__ : str=None ) -> SummarizationModule:
Path(args.output_dir ).mkdir(exist_ok=UpperCamelCase__ )
check_output_dir(UpperCamelCase__ , expected_items=3 )
if model is None:
if "summarization" in args.task:
lowerCamelCase : SummarizationModule = SummarizationModule(UpperCamelCase__ )
else:
lowerCamelCase : SummarizationModule = TranslationModule(UpperCamelCase__ )
lowerCamelCase : str = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
lowerCamelCase : List[str] = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
lowerCamelCase : Dict = os.environ.get("""WANDB_PROJECT""" , UpperCamelCase__ )
lowerCamelCase : Optional[Any] = WandbLogger(name=model.output_dir.name , project=UpperCamelCase__ )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
lowerCamelCase : Tuple = WandbLogger(name=model.output_dir.name , project=F'hf_{dataset}' )
if args.early_stopping_patience >= 0:
lowerCamelCase : str = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
lowerCamelCase : int = False
lowerCamelCase : Tuple = args.val_metric == """loss"""
lowerCamelCase : pl.Trainer = generic_train(
UpperCamelCase__ , UpperCamelCase__ , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , UpperCamelCase__ ) , early_stopping_callback=UpperCamelCase__ , logger=UpperCamelCase__ , )
pickle_save(model.hparams , model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
lowerCamelCase : Optional[int] = """"""
lowerCamelCase : Optional[Any] = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=UpperCamelCase__ ) )
if checkpoints:
lowerCamelCase : List[Any] = checkpoints[-1]
lowerCamelCase : str = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__lowerCamelCase :Optional[Any] = argparse.ArgumentParser()
__lowerCamelCase :int = pl.Trainer.add_argparse_args(parser)
__lowerCamelCase :Optional[Any] = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__lowerCamelCase :Any = parser.parse_args()
main(args)
| 701
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase :List[str] = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[int] = ['OwlViTFeatureExtractor']
__lowerCamelCase :List[str] = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :Optional[Any] = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 42
| 0
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__lowercase):
"""simple docstring"""
snake_case__ : List[Any] =['''flax''']
def __init__( self: List[Any] , *__a: List[str] , **__a: int )-> Union[str, Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def a__ ( cls: Optional[int] , *__a: List[str] , **__a: Union[str, Any] )-> Tuple:
requires_backends(cls , ["""flax"""] )
@classmethod
def a__ ( cls: List[str] , *__a: Optional[Any] , **__a: str )-> Optional[int]:
requires_backends(cls , ["""flax"""] )
class A__ ( metaclass=__lowercase):
"""simple docstring"""
snake_case__ : int =['''flax''']
def __init__( self: Union[str, Any] , *__a: Optional[int] , **__a: Optional[int] )-> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def a__ ( cls: str , *__a: Any , **__a: Tuple )-> List[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def a__ ( cls: List[Any] , *__a: int , **__a: List[Any] )-> List[Any]:
requires_backends(cls , ["""flax"""] )
class A__ ( metaclass=__lowercase):
"""simple docstring"""
snake_case__ : Optional[Any] =['''flax''']
def __init__( self: Tuple , *__a: List[Any] , **__a: List[str] )-> Union[str, Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def a__ ( cls: Optional[int] , *__a: Optional[Any] , **__a: Optional[int] )-> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def a__ ( cls: str , *__a: List[str] , **__a: List[Any] )-> Tuple:
requires_backends(cls , ["""flax"""] )
class A__ ( metaclass=__lowercase):
"""simple docstring"""
snake_case__ : Tuple =['''flax''']
def __init__( self: str , *__a: Tuple , **__a: Union[str, Any] )-> Optional[int]:
requires_backends(self , ["""flax"""] )
@classmethod
def a__ ( cls: List[str] , *__a: Any , **__a: Optional[Any] )-> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def a__ ( cls: Optional[Any] , *__a: Tuple , **__a: List[Any] )-> Any:
requires_backends(cls , ["""flax"""] )
class A__ ( metaclass=__lowercase):
"""simple docstring"""
snake_case__ : Union[str, Any] =['''flax''']
def __init__( self: str , *__a: Optional[int] , **__a: Tuple )-> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def a__ ( cls: Optional[int] , *__a: Optional[Any] , **__a: int )-> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def a__ ( cls: List[Any] , *__a: int , **__a: Tuple )-> List[Any]:
requires_backends(cls , ["""flax"""] )
class A__ ( metaclass=__lowercase):
"""simple docstring"""
snake_case__ : int =['''flax''']
def __init__( self: int , *__a: Tuple , **__a: Tuple )-> Union[str, Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def a__ ( cls: Tuple , *__a: str , **__a: int )-> Optional[int]:
requires_backends(cls , ["""flax"""] )
@classmethod
def a__ ( cls: List[str] , *__a: Optional[int] , **__a: List[Any] )-> Tuple:
requires_backends(cls , ["""flax"""] )
class A__ ( metaclass=__lowercase):
"""simple docstring"""
snake_case__ : Optional[int] =['''flax''']
def __init__( self: Optional[Any] , *__a: Union[str, Any] , **__a: Dict )-> Any:
requires_backends(self , ["""flax"""] )
@classmethod
def a__ ( cls: Tuple , *__a: str , **__a: Any )-> Any:
requires_backends(cls , ["""flax"""] )
@classmethod
def a__ ( cls: Optional[int] , *__a: List[str] , **__a: str )-> List[Any]:
requires_backends(cls , ["""flax"""] )
class A__ ( metaclass=__lowercase):
"""simple docstring"""
snake_case__ : Dict =['''flax''']
def __init__( self: Any , *__a: Any , **__a: Tuple )-> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def a__ ( cls: List[Any] , *__a: Union[str, Any] , **__a: Dict )-> Any:
requires_backends(cls , ["""flax"""] )
@classmethod
def a__ ( cls: Any , *__a: Any , **__a: List[Any] )-> Optional[int]:
requires_backends(cls , ["""flax"""] )
class A__ ( metaclass=__lowercase):
"""simple docstring"""
snake_case__ : Dict =['''flax''']
def __init__( self: Dict , *__a: Tuple , **__a: List[str] )-> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def a__ ( cls: Any , *__a: List[str] , **__a: List[Any] )-> Optional[int]:
requires_backends(cls , ["""flax"""] )
@classmethod
def a__ ( cls: Union[str, Any] , *__a: Optional[int] , **__a: str )-> Any:
requires_backends(cls , ["""flax"""] )
class A__ ( metaclass=__lowercase):
"""simple docstring"""
snake_case__ : Optional[Any] =['''flax''']
def __init__( self: Optional[int] , *__a: str , **__a: Union[str, Any] )-> List[str]:
requires_backends(self , ["""flax"""] )
@classmethod
def a__ ( cls: Optional[Any] , *__a: Tuple , **__a: str )-> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def a__ ( cls: Optional[Any] , *__a: int , **__a: List[Any] )-> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class A__ ( metaclass=__lowercase):
"""simple docstring"""
snake_case__ : List[str] =['''flax''']
def __init__( self: int , *__a: Optional[int] , **__a: List[str] )-> Any:
requires_backends(self , ["""flax"""] )
@classmethod
def a__ ( cls: Optional[Any] , *__a: List[Any] , **__a: List[Any] )-> Optional[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def a__ ( cls: Any , *__a: Tuple , **__a: Tuple )-> int:
requires_backends(cls , ["""flax"""] )
class A__ ( metaclass=__lowercase):
"""simple docstring"""
snake_case__ : Tuple =['''flax''']
def __init__( self: Optional[Any] , *__a: int , **__a: Union[str, Any] )-> Optional[int]:
requires_backends(self , ["""flax"""] )
@classmethod
def a__ ( cls: Tuple , *__a: List[Any] , **__a: Union[str, Any] )-> List[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def a__ ( cls: Optional[Any] , *__a: List[str] , **__a: Optional[int] )-> Tuple:
requires_backends(cls , ["""flax"""] )
class A__ ( metaclass=__lowercase):
"""simple docstring"""
snake_case__ : str =['''flax''']
def __init__( self: Dict , *__a: Optional[int] , **__a: Dict )-> Dict:
requires_backends(self , ["""flax"""] )
@classmethod
def a__ ( cls: Union[str, Any] , *__a: List[Any] , **__a: Union[str, Any] )-> Optional[Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def a__ ( cls: Any , *__a: Optional[int] , **__a: Tuple )-> List[str]:
requires_backends(cls , ["""flax"""] )
| 702
|
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
"""simple docstring"""
def __init__( self: List[Any] , __a: List[str] , __a: Optional[int]=13 , __a: List[str]=32 , __a: int=2 , __a: List[str]=3 , __a: Union[str, Any]=16 , __a: int=[32, 64, 128] , __a: Optional[Any]=[1, 2, 1] , __a: Optional[int]=[2, 2, 4] , __a: Tuple=2 , __a: Dict=2.0 , __a: List[str]=True , __a: Optional[Any]=0.0 , __a: Any=0.0 , __a: List[Any]=0.1 , __a: List[str]="gelu" , __a: Tuple=False , __a: Union[str, Any]=True , __a: Optional[int]=0.02 , __a: Tuple=1e-5 , __a: int=True , __a: List[Any]=None , __a: Optional[int]=True , __a: Dict=10 , __a: List[str]=8 , __a: Any=["stage1", "stage2"] , __a: Union[str, Any]=[1, 2] , )-> Dict:
lowerCamelCase : Dict = parent
lowerCamelCase : Optional[Any] = batch_size
lowerCamelCase : Union[str, Any] = image_size
lowerCamelCase : Optional[int] = patch_size
lowerCamelCase : Any = num_channels
lowerCamelCase : Any = embed_dim
lowerCamelCase : Dict = hidden_sizes
lowerCamelCase : List[Any] = depths
lowerCamelCase : Tuple = num_heads
lowerCamelCase : List[Any] = window_size
lowerCamelCase : str = mlp_ratio
lowerCamelCase : str = qkv_bias
lowerCamelCase : str = hidden_dropout_prob
lowerCamelCase : Dict = attention_probs_dropout_prob
lowerCamelCase : Tuple = drop_path_rate
lowerCamelCase : Dict = hidden_act
lowerCamelCase : Tuple = use_absolute_embeddings
lowerCamelCase : List[str] = patch_norm
lowerCamelCase : List[str] = layer_norm_eps
lowerCamelCase : str = initializer_range
lowerCamelCase : Tuple = is_training
lowerCamelCase : int = scope
lowerCamelCase : Union[str, Any] = use_labels
lowerCamelCase : List[str] = type_sequence_label_size
lowerCamelCase : str = encoder_stride
lowerCamelCase : List[str] = out_features
lowerCamelCase : Optional[int] = out_indices
def a__ ( self: Optional[Any] )-> Union[str, Any]:
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : str = None
if self.use_labels:
lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : str = self.get_config()
return config, pixel_values, labels
def a__ ( self: List[Any] )-> Optional[int]:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def a__ ( self: Tuple , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = FocalNetModel(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Tuple = model(__a )
lowerCamelCase : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCamelCase : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def a__ ( self: Optional[int] , __a: Dict , __a: Tuple , __a: List[Any] )-> int:
lowerCamelCase : List[Any] = FocalNetBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Optional[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowerCamelCase : Dict = None
lowerCamelCase : Dict = FocalNetBackbone(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : Any = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a__ ( self: Optional[int] , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]:
lowerCamelCase : Tuple = FocalNetForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
lowerCamelCase : List[str] = model(__a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCamelCase : List[str] = 1
lowerCamelCase : Any = FocalNetForMaskedImageModeling(__a )
model.to(__a )
model.eval()
lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : Tuple = model(__a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def a__ ( self: str , __a: Optional[Any] , __a: Optional[Any] , __a: Tuple )-> str:
lowerCamelCase : Optional[Any] = self.type_sequence_label_size
lowerCamelCase : Optional[Any] = FocalNetForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : List[str] = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase : int = 1
lowerCamelCase : List[Any] = FocalNetForImageClassification(__a )
model.to(__a )
model.eval()
lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase : Optional[Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self: int )-> Optional[int]:
lowerCamelCase : str = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = config_and_inputs
lowerCamelCase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =(
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
snake_case__ : Optional[int] =(
{'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification}
if is_torch_available()
else {}
)
snake_case__ : Tuple =False
snake_case__ : Dict =False
snake_case__ : Dict =False
snake_case__ : Tuple =False
snake_case__ : Optional[int] =False
def a__ ( self: Union[str, Any] )-> Optional[int]:
lowerCamelCase : List[str] = FocalNetModelTester(self )
lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__a , embed_dim=37 , has_text_modality=__a )
def a__ ( self: List[str] )-> List[str]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a__ ( self: List[str] )-> Union[str, Any]:
return
def a__ ( self: Tuple )-> Tuple:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: List[Any] )-> Dict:
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def a__ ( self: List[Any] )-> Tuple:
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def a__ ( self: List[str] )-> Dict:
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def a__ ( self: Optional[Any] )-> str:
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def a__ ( self: Optional[Any] )-> Dict:
pass
def a__ ( self: Optional[Any] )-> Dict:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : Any = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def a__ ( self: Tuple )-> Optional[int]:
lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : int = model_class(__a )
lowerCamelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : Any = [*signature.parameters.keys()]
lowerCamelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def a__ ( self: str , __a: Union[str, Any] , __a: int , __a: Tuple , __a: List[str] )-> Union[str, Any]:
lowerCamelCase : List[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
lowerCamelCase : List[str] = model(**self._prepare_for_class(__a , __a ) )
lowerCamelCase : List[str] = outputs.hidden_states
lowerCamelCase : Tuple = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__a ) , __a )
# FocalNet has a different seq_length
lowerCamelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCamelCase : Optional[Any] = outputs.reshaped_hidden_states
self.assertEqual(len(__a ) , __a )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = reshaped_hidden_states[0].shape
lowerCamelCase : Tuple = (
reshaped_hidden_states[0].view(__a , __a , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def a__ ( self: Any )-> Any:
lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : List[str] = True
self.check_hidden_states_output(__a , __a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : List[Any] = True
self.check_hidden_states_output(__a , __a , __a , __a )
def a__ ( self: str )-> Union[str, Any]:
lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : List[str] = 3
lowerCamelCase : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCamelCase : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCamelCase : Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCamelCase : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowerCamelCase : str = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Union[str, Any] = True
self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) )
@slow
def a__ ( self: Optional[int] )-> List[Any]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : List[str] = FocalNetModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def a__ ( self: str )-> Any:
lowerCamelCase , lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : int = _config_zero_init(__a )
for model_class in self.all_model_classes:
lowerCamelCase : int = model_class(config=__a )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class A__ ( unittest.TestCase):
"""simple docstring"""
@cached_property
def a__ ( self: Optional[int] )-> Optional[Any]:
# TODO update organization
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : Tuple = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(__a )
lowerCamelCase : Any = self.default_image_processor
lowerCamelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCamelCase : int = image_processor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
lowerCamelCase : Any = model(**__a )
# verify the logits
lowerCamelCase : Tuple = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , __a )
lowerCamelCase : List[str] = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : str =(FocalNetBackbone,) if is_torch_available() else ()
snake_case__ : Optional[int] =FocalNetConfig
snake_case__ : str =False
def a__ ( self: Union[str, Any] )-> Tuple:
lowerCamelCase : str = FocalNetModelTester(self )
| 42
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.