code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
from __future__ import annotations _A : int = """#""" class __snake_case : '''simple docstring''' def __init__( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = {} def lowercase_ ( self , A_ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = self._trie for char in text: if char not in trie: SCREAMING_SNAKE_CASE__ = {} SCREAMING_SNAKE_CASE__ = trie[char] SCREAMING_SNAKE_CASE__ = True def lowercase_ ( self , A_ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = self._trie for char in prefix: if char in trie: SCREAMING_SNAKE_CASE__ = trie[char] else: return [] return self._elements(A_ ) def lowercase_ ( self , A_ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = [] for c, v in d.items(): SCREAMING_SNAKE_CASE__ = [''' '''] if c == END else [(c + s) for s in self._elements(A_ )] result.extend(A_ ) return tuple(A_ ) _A : Any = Trie() _A : Tuple = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""") for word in words: trie.insert_word(word) def __snake_case ( lowerCAmelCase_ ) -> tuple: SCREAMING_SNAKE_CASE__ = trie.find_word(lowerCAmelCase_ ) return tuple(string + word for word in suffixes ) def __snake_case ( ) -> None: print(autocomplete_using_trie('''de''' ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
100
import collections import inspect import unittest from transformers import FocalNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, ) from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCAmelCase_ : """simple docstring""" def __init__( self: int , _UpperCAmelCase: Any , _UpperCAmelCase: Tuple=13 , _UpperCAmelCase: Optional[Any]=32 , _UpperCAmelCase: List[Any]=2 , _UpperCAmelCase: Optional[int]=3 , _UpperCAmelCase: Optional[int]=16 , _UpperCAmelCase: Optional[Any]=[32, 64, 128] , _UpperCAmelCase: Optional[int]=[1, 2, 1] , _UpperCAmelCase: int=[2, 2, 4] , _UpperCAmelCase: List[str]=2 , _UpperCAmelCase: Dict=2.0 , _UpperCAmelCase: Optional[int]=True , _UpperCAmelCase: str=0.0 , _UpperCAmelCase: int=0.0 , _UpperCAmelCase: str=0.1 , _UpperCAmelCase: Dict="gelu" , _UpperCAmelCase: Optional[Any]=False , _UpperCAmelCase: Union[str, Any]=True , _UpperCAmelCase: Union[str, Any]=0.0_2 , _UpperCAmelCase: Optional[int]=1e-5 , _UpperCAmelCase: Optional[int]=True , _UpperCAmelCase: Optional[Any]=None , _UpperCAmelCase: Tuple=True , _UpperCAmelCase: str=10 , _UpperCAmelCase: int=8 , _UpperCAmelCase: List[Any]=["stage1", "stage2"] , _UpperCAmelCase: List[Any]=[1, 2] , ): _lowerCAmelCase :Optional[int] = parent _lowerCAmelCase :Dict = batch_size _lowerCAmelCase :Optional[Any] = image_size _lowerCAmelCase :Optional[Any] = patch_size _lowerCAmelCase :List[Any] = num_channels _lowerCAmelCase :Optional[int] = embed_dim _lowerCAmelCase :List[str] = hidden_sizes _lowerCAmelCase :Union[str, Any] = depths _lowerCAmelCase :int = num_heads _lowerCAmelCase :Any = window_size _lowerCAmelCase :List[Any] = mlp_ratio _lowerCAmelCase :Optional[int] = qkv_bias _lowerCAmelCase :Union[str, Any] = hidden_dropout_prob _lowerCAmelCase :Optional[int] = attention_probs_dropout_prob _lowerCAmelCase :Dict = drop_path_rate _lowerCAmelCase :List[Any] = hidden_act _lowerCAmelCase :Tuple = use_absolute_embeddings _lowerCAmelCase :Optional[int] = patch_norm _lowerCAmelCase :Optional[Any] = layer_norm_eps _lowerCAmelCase :Union[str, Any] = initializer_range _lowerCAmelCase :List[str] = is_training _lowerCAmelCase :str = scope _lowerCAmelCase :Optional[int] = use_labels _lowerCAmelCase :List[Any] = type_sequence_label_size _lowerCAmelCase :Union[str, Any] = encoder_stride _lowerCAmelCase :Optional[int] = out_features _lowerCAmelCase :List[str] = out_indices def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): _lowerCAmelCase :Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCAmelCase :Dict = None if self.use_labels: _lowerCAmelCase :List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase :str = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self: int ): return FocalNetConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple ): _lowerCAmelCase :List[Any] = FocalNetModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _lowerCAmelCase :List[str] = model(_UpperCAmelCase ) _lowerCAmelCase :Union[str, Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) _lowerCAmelCase :List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[Any] ): _lowerCAmelCase :Union[str, Any] = FocalNetBackbone(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _lowerCAmelCase :str = model(_UpperCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] ) # verify backbone works with out_features=None _lowerCAmelCase :Optional[int] = None _lowerCAmelCase :Dict = FocalNetBackbone(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _lowerCAmelCase :Any = model(_UpperCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: int , _UpperCAmelCase: Optional[Any] ): _lowerCAmelCase :Any = FocalNetForMaskedImageModeling(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _lowerCAmelCase :str = model(_UpperCAmelCase ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images _lowerCAmelCase :List[Any] = 1 _lowerCAmelCase :List[Any] = FocalNetForMaskedImageModeling(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _lowerCAmelCase :Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _lowerCAmelCase :int = model(_UpperCAmelCase ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: int , _UpperCAmelCase: Dict , _UpperCAmelCase: Optional[int] ): _lowerCAmelCase :Union[str, Any] = self.type_sequence_label_size _lowerCAmelCase :Dict = FocalNetForImageClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _lowerCAmelCase :Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _lowerCAmelCase :Optional[int] = 1 _lowerCAmelCase :Tuple = FocalNetForImageClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _lowerCAmelCase :Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _lowerCAmelCase :List[str] = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): _lowerCAmelCase :Tuple = self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :str = config_and_inputs _lowerCAmelCase :List[str] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase_ (snake_case__ , snake_case__ , unittest.TestCase ): """simple docstring""" lowerCamelCase : Optional[int] = ( ( FocalNetModel, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetBackbone, ) if is_torch_available() else () ) lowerCamelCase : Optional[Any] = ( {'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification} if is_torch_available() else {} ) lowerCamelCase : Tuple = False lowerCamelCase : Union[str, Any] = False lowerCamelCase : Union[str, Any] = False lowerCamelCase : Any = False lowerCamelCase : List[Any] = False def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): _lowerCAmelCase :Tuple = FocalNetModelTester(self ) _lowerCAmelCase :str = ConfigTester(self , config_class=_UpperCAmelCase , embed_dim=37 , has_text_modality=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: List[str] ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ): return def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): _lowerCAmelCase :List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: int ): _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: List[str] ): _lowerCAmelCase :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: str ): _lowerCAmelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase ) @unittest.skip(reason='FocalNet does not use inputs_embeds' ) def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): pass @unittest.skip(reason='FocalNet does not use feedforward chunking' ) def SCREAMING_SNAKE_CASE__ ( self: str ): pass def SCREAMING_SNAKE_CASE__ ( self: Tuple ): _lowerCAmelCase , _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: _lowerCAmelCase :Optional[Any] = model_class(_UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _lowerCAmelCase :Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) ) def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ): _lowerCAmelCase , _lowerCAmelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: _lowerCAmelCase :Tuple = model_class(_UpperCAmelCase ) _lowerCAmelCase :Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase :int = [*signature.parameters.keys()] _lowerCAmelCase :List[str] = ['pixel_values'] self.assertListEqual(arg_names[:1] , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Any , _UpperCAmelCase: int , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Optional[int] ): _lowerCAmelCase :Union[str, Any] = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() with torch.no_grad(): _lowerCAmelCase :Optional[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) _lowerCAmelCase :List[Any] = outputs.hidden_states _lowerCAmelCase :str = getattr( self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase ) # FocalNet has a different seq_length _lowerCAmelCase :Any = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _lowerCAmelCase :List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) _lowerCAmelCase :List[str] = outputs.reshaped_hidden_states self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :int = reshaped_hidden_states[0].shape _lowerCAmelCase :Optional[int] = ( reshaped_hidden_states[0].view(_UpperCAmelCase , _UpperCAmelCase , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def SCREAMING_SNAKE_CASE__ ( self: List[Any] ): _lowerCAmelCase , _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase :List[str] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes[:-1]: _lowerCAmelCase :Optional[int] = True self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCAmelCase :Dict = True self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ): _lowerCAmelCase , _lowerCAmelCase :str = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase :str = 3 _lowerCAmelCase :Union[str, Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) _lowerCAmelCase :int = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _lowerCAmelCase :Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) _lowerCAmelCase :Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes[:-1]: _lowerCAmelCase :List[str] = True self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCAmelCase :Union[str, Any] = True self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) ) @slow def SCREAMING_SNAKE_CASE__ ( self: int ): for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase :List[Any] = FocalNetModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Tuple ): _lowerCAmelCase , _lowerCAmelCase :int = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase :Optional[int] = _config_zero_init(_UpperCAmelCase ) for model_class in self.all_model_classes: _lowerCAmelCase :str = model_class(config=_UpperCAmelCase ) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @require_vision @require_torch class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" @cached_property def SCREAMING_SNAKE_CASE__ ( self: Dict ): # TODO update organization return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE__ ( self: Any ): _lowerCAmelCase :Tuple = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(_UpperCAmelCase ) _lowerCAmelCase :Union[str, Any] = self.default_image_processor _lowerCAmelCase :Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) _lowerCAmelCase :Any = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase ) # forward pass with torch.no_grad(): _lowerCAmelCase :Dict = model(**_UpperCAmelCase ) # verify the logits _lowerCAmelCase :str = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _UpperCAmelCase ) _lowerCAmelCase :Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(_UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) ) self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 ) @require_torch class UpperCAmelCase_ (snake_case__ , unittest.TestCase ): """simple docstring""" lowerCamelCase : int = (FocalNetBackbone,) if is_torch_available() else () lowerCamelCase : str = FocalNetConfig lowerCamelCase : Union[str, Any] = False def SCREAMING_SNAKE_CASE__ ( self: List[Any] ): _lowerCAmelCase :Any = FocalNetModelTester(self )
687
0
import torch from torch import nn from transformers import CLIPPreTrainedModel, CLIPVisionModel from ...models.attention import BasicTransformerBlock from ...utils import logging lowerCAmelCase__ : Optional[int] =logging.get_logger(__name__) # pylint: disable=invalid-name class __lowercase (__SCREAMING_SNAKE_CASE ): """simple docstring""" def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7_6_8 ): """simple docstring""" super().__init__(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Tuple = proj_size SCREAMING_SNAKE_CASE_ : int = CLIPVisionModel(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : str = PaintByExampleMapper(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = nn.LayerNorm(config.hidden_size ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = nn.Linear(config.hidden_size , self.proj_size ) # uncondition for scaling SCREAMING_SNAKE_CASE_ : List[Any] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) ) def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str = self.model(pixel_values=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : List[str] = clip_output.pooler_output SCREAMING_SNAKE_CASE_ : Dict = self.mapper(latent_states[:, None] ) SCREAMING_SNAKE_CASE_ : Optional[Any] = self.final_layer_norm(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Tuple = self.proj_out(lowerCAmelCase__ ) if return_uncond_vector: return latent_states, self.uncond_vector return latent_states class __lowercase (nn.Module ): """simple docstring""" def __init__( self , lowerCAmelCase__ ): """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE_ : str = (config.num_hidden_layers + 1) // 5 SCREAMING_SNAKE_CASE_ : Tuple = config.hidden_size SCREAMING_SNAKE_CASE_ : Optional[int] = 1 SCREAMING_SNAKE_CASE_ : Optional[int] = nn.ModuleList( [ BasicTransformerBlock(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , activation_fn='gelu' , attention_bias=lowerCAmelCase__ ) for _ in range(lowerCAmelCase__ ) ] ) def UpperCamelCase__ ( self , lowerCAmelCase__ ): """simple docstring""" for block in self.blocks: SCREAMING_SNAKE_CASE_ : Optional[Any] = block(lowerCAmelCase__ ) return hidden_states
101
import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel a = HfApi() a = {} # fmt: off a = torch.tensor([ -0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7, 1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9, -1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9, 0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7 ]) a = torch.tensor([ -2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6, 1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8, -2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8, 2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5 ]) a = torch.tensor([ -0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9, -0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4, -0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5, 0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3 ]) a = torch.tensor([ 0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2, -0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9, 0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5, -0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5 ]) a = torch.tensor([ 0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3, -0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5, 0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9, -0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6 ]) a = torch.tensor([ 0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8, -0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0, 0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3, -0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1 ]) a = torch.tensor([ 0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2, -0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8, 0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4, -0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0 ]) a = torch.tensor([ 0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2, -0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0, 0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6, -0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3 ]) a = torch.tensor([ -1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0, 1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3, -2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0, 1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1]) a = torch.tensor([ -1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4, 0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1, -2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9, 1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6 ]) a = torch.tensor([ -1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2, 0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7, -2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1, 1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5 ]) a = torch.tensor([ -2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9, 1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1, -3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1, 3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6 ]) a = torch.tensor([ -2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0, 1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8, -2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5, 2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3 ]) a = torch.tensor([ -2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6, 1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8, -3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0, 3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3 ]) a = torch.tensor([ -1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4, 1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1, -2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9, 1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9 ]) # fmt: on a = api.list_models(filter="""diffusers""") for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": a = """/home/patrick/google_checkpoints/""" + mod.modelId.split("""/""")[-1] print(F'''Started running {mod.modelId}!!!''') if mod.modelId.startswith("""CompVis"""): a = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""") else: a = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) a = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) a = torch.tensor([10] * noise.shape[0]) with torch.no_grad(): a = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1E-3 ) print(F'''{mod.modelId} has passed successfully!!!''')
687
0
"""simple docstring""" # this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.: # python ./utils/get_modified_files.py utils src tests examples # # it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered # since the output of this script is fed into Makefile commands it doesn't print a newline after the results import re import subprocess import sys __magic_name__ : int = subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""") __magic_name__ : Union[str, Any] = ( subprocess.check_output(f'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode("""utf-8""").split() ) __magic_name__ : Optional[int] = """|""".join(sys.argv[1:]) __magic_name__ : Union[str, Any] = re.compile(Rf'''^({joined_dirs}).*?\.py$''') __magic_name__ : Dict = [x for x in modified_files if regex.match(x)] print(""" """.join(relevant_modified_files), end="""""")
102
import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE__ ( self: int ): _lowerCAmelCase :Optional[int] = 10 def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): _lowerCAmelCase :str = [1, 2, 3, 4] _lowerCAmelCase :Union[str, Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: int ): _lowerCAmelCase :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] _lowerCAmelCase :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ): _lowerCAmelCase :Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] _lowerCAmelCase :Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: List[str] ): _lowerCAmelCase :List[str] = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.' _lowerCAmelCase , _lowerCAmelCase :Optional[Any] = process_story(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , [] ) def SCREAMING_SNAKE_CASE__ ( self: Any ): _lowerCAmelCase :Optional[int] = '' _lowerCAmelCase , _lowerCAmelCase :str = process_story(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , [] ) self.assertEqual(_UpperCAmelCase , [] ) def SCREAMING_SNAKE_CASE__ ( self: str ): _lowerCAmelCase :Optional[Any] = ( 'It was the year of Our Lord one thousand seven hundred and ' 'seventy-five\n\nSpiritual revelations were conceded to England ' 'at that favoured period, as at this.\n@highlight\n\nIt was the best of times' ) _lowerCAmelCase , _lowerCAmelCase :Optional[int] = process_story(_UpperCAmelCase ) _lowerCAmelCase :Optional[Any] = [ 'It was the year of Our Lord one thousand seven hundred and seventy-five.', 'Spiritual revelations were conceded to England at that favoured period, as at this.', ] self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) _lowerCAmelCase :Optional[int] = ['It was the best of times.'] self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Tuple ): _lowerCAmelCase :Union[str, Any] = torch.tensor([1, 2, 3, 4] ) _lowerCAmelCase :List[Any] = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 0 ).numpy() , expected.numpy() ) def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ): _lowerCAmelCase :List[Any] = torch.tensor([1, 2, 3, 4, 23, 23, 23] ) _lowerCAmelCase :Optional[int] = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 23 ).numpy() , expected.numpy() ) def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): _lowerCAmelCase :Tuple = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) _lowerCAmelCase :List[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 1 ).numpy() , expected.numpy() ) def SCREAMING_SNAKE_CASE__ ( self: str ): _lowerCAmelCase :List[str] = 101 _lowerCAmelCase :Dict = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] ) _lowerCAmelCase :int = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) _lowerCAmelCase :List[str] = compute_token_type_ids(_UpperCAmelCase , _UpperCAmelCase ) np.testing.assert_array_equal(_UpperCAmelCase , _UpperCAmelCase )
687
0
"""simple docstring""" import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, AutoConfig, AutoImageProcessor, CLIPConfig, CLIPImageProcessor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils''')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_image_processing import CustomImageProcessor # noqa E402 class UpperCAmelCase ( unittest.TestCase ): def __UpperCAmelCase ( self : int ): """simple docstring""" _snake_case = 0 def __UpperCAmelCase ( self : Optional[Any] ): """simple docstring""" _snake_case = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) def __UpperCAmelCase ( self : Tuple ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: _snake_case = Path(__lowerCamelCase ) / '''preprocessor_config.json''' _snake_case = Path(__lowerCamelCase ) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowerCamelCase , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(__lowerCamelCase , '''w''' ) ) _snake_case = AutoImageProcessor.from_pretrained(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) def __UpperCAmelCase ( self : int ): """simple docstring""" # Ensure we can load the image processor from the feature extractor config with tempfile.TemporaryDirectory() as tmpdirname: _snake_case = Path(__lowerCamelCase ) / '''preprocessor_config.json''' _snake_case = Path(__lowerCamelCase ) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowerCamelCase , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(__lowerCamelCase , '''w''' ) ) _snake_case = AutoImageProcessor.from_pretrained(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) def __UpperCAmelCase ( self : Optional[int] ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: _snake_case = CLIPConfig() # Create a dummy config file with image_proceesor_type _snake_case = Path(__lowerCamelCase ) / '''preprocessor_config.json''' _snake_case = Path(__lowerCamelCase ) / '''config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowerCamelCase , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(__lowerCamelCase , '''w''' ) ) # remove image_processor_type to make sure config.json alone is enough to load image processor locally _snake_case = AutoImageProcessor.from_pretrained(__lowerCamelCase ).to_dict() config_dict.pop('''image_processor_type''' ) _snake_case = CLIPImageProcessor(**__lowerCamelCase ) # save in new folder model_config.save_pretrained(__lowerCamelCase ) config.save_pretrained(__lowerCamelCase ) _snake_case = AutoImageProcessor.from_pretrained(__lowerCamelCase ) # make sure private variable is not incorrectly saved _snake_case = json.loads(config.to_json_string() ) self.assertTrue('''_processor_class''' not in dict_as_saved ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) def __UpperCAmelCase ( self : Any ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: _snake_case = Path(__lowerCamelCase ) / '''preprocessor_config.json''' json.dump( {'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowerCamelCase , '''w''' ) , ) _snake_case = AutoImageProcessor.from_pretrained(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) def __UpperCAmelCase ( self : Tuple ): """simple docstring""" with self.assertRaisesRegex( __lowerCamelCase , '''clip-base is not a local folder and is not a valid model identifier''' ): _snake_case = AutoImageProcessor.from_pretrained('''clip-base''' ) def __UpperCAmelCase ( self : Union[str, Any] ): """simple docstring""" with self.assertRaisesRegex( __lowerCamelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ): _snake_case = AutoImageProcessor.from_pretrained(__lowerCamelCase , revision='''aaaaaa''' ) def __UpperCAmelCase ( self : Any ): """simple docstring""" with self.assertRaisesRegex( __lowerCamelCase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ): _snake_case = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' ) def __UpperCAmelCase ( self : List[Any] ): """simple docstring""" # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(__lowerCamelCase ): _snake_case = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' ) # If remote code is disabled, we can't load this config. with self.assertRaises(__lowerCamelCase ): _snake_case = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__lowerCamelCase ) _snake_case = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__lowerCamelCase ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) # Test image processor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(__lowerCamelCase ) _snake_case = AutoImageProcessor.from_pretrained(__lowerCamelCase , trust_remote_code=__lowerCamelCase ) self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' ) def __UpperCAmelCase ( self : Dict ): """simple docstring""" try: AutoConfig.register('''custom''' , __lowerCamelCase ) AutoImageProcessor.register(__lowerCamelCase , __lowerCamelCase ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__lowerCamelCase ): AutoImageProcessor.register(__lowerCamelCase , __lowerCamelCase ) with tempfile.TemporaryDirectory() as tmpdirname: _snake_case = Path(__lowerCamelCase ) / '''preprocessor_config.json''' _snake_case = Path(__lowerCamelCase ) / '''config.json''' json.dump( {'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__lowerCamelCase , '''w''' ) , ) json.dump({'''model_type''': '''clip'''} , open(__lowerCamelCase , '''w''' ) ) _snake_case = CustomImageProcessor.from_pretrained(__lowerCamelCase ) # Now that the config is registered, it can be used as any other config with the auto-API with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained(__lowerCamelCase ) _snake_case = AutoImageProcessor.from_pretrained(__lowerCamelCase ) self.assertIsInstance(__lowerCamelCase , __lowerCamelCase ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig] def __UpperCAmelCase ( self : Optional[int] ): """simple docstring""" class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): A__ : int = True try: AutoConfig.register('''custom''' , __lowerCamelCase ) AutoImageProcessor.register(__lowerCamelCase , __lowerCamelCase ) # If remote code is not set, the default is to use local _snake_case = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(image_processor.is_local ) # If remote code is disabled, we load the local one. _snake_case = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__lowerCamelCase ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(image_processor.is_local ) # If remote is enabled, we load from the Hub _snake_case = AutoImageProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__lowerCamelCase ) self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' ) self.assertTrue(not hasattr(__lowerCamelCase , '''is_local''' ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content: del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
103
def UpperCamelCase_( __magic_name__ : int ): """simple docstring""" return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number if __name__ == "__main__": print("""Program to check whether a number is a Perfect number or not...""") a = int(input("""Enter number: """).strip()) print(F'''{number} is {'' if perfect(number) else 'not '}a Perfect Number.''')
687
0
"""simple docstring""" def _lowerCamelCase ( UpperCAmelCase_ : str = "The quick brown fox jumps over the lazy dog", ) -> bool: """simple docstring""" A__ = set() # Replace all the whitespace in our sentence A__ = input_str.replace(" ", "" ) for alpha in input_str: if "a" <= alpha.lower() <= "z": frequency.add(alpha.lower() ) return len(UpperCAmelCase_ ) == 26 def _lowerCamelCase ( UpperCAmelCase_ : str = "The quick brown fox jumps over the lazy dog", ) -> bool: """simple docstring""" A__ = [False] * 26 for char in input_str: if char.islower(): A__ = True elif char.isupper(): A__ = True return all(UpperCAmelCase_ ) def _lowerCamelCase ( UpperCAmelCase_ : str = "The quick brown fox jumps over the lazy dog", ) -> bool: """simple docstring""" return len({char for char in input_str.lower() if char.isalpha()} ) == 26 def _lowerCamelCase ( ) -> None: """simple docstring""" from timeit import timeit A__ = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest" print(timeit("is_pangram()", setup=UpperCAmelCase_ ) ) print(timeit("is_pangram_faster()", setup=UpperCAmelCase_ ) ) print(timeit("is_pangram_fastest()", setup=UpperCAmelCase_ ) ) # 5.348480500048026, 2.6477354579837993, 1.8470395830227062 # 5.036091582966037, 2.644472333951853, 1.8869528750656173 if __name__ == "__main__": import doctest doctest.testmod() benchmark()
104
from __future__ import annotations from collections.abc import MutableSequence class UpperCAmelCase_ : """simple docstring""" def __init__( self: List[Any] , _UpperCAmelCase: int , _UpperCAmelCase: MutableSequence[float] ): if len(_UpperCAmelCase ) != degree + 1: raise ValueError( 'The number of coefficients should be equal to the degree + 1.' ) _lowerCAmelCase :list[float] = list(_UpperCAmelCase ) _lowerCAmelCase :Optional[Any] = degree def __add__( self: str , _UpperCAmelCase: Polynomial ): if self.degree > polynomial_a.degree: _lowerCAmelCase :Any = self.coefficients[:] for i in range(polynomial_a.degree + 1 ): coefficients[i] += polynomial_a.coefficients[i] return Polynomial(self.degree , _UpperCAmelCase ) else: _lowerCAmelCase :List[Any] = polynomial_a.coefficients[:] for i in range(self.degree + 1 ): coefficients[i] += self.coefficients[i] return Polynomial(polynomial_a.degree , _UpperCAmelCase ) def __sub__( self: str , _UpperCAmelCase: Polynomial ): return self + polynomial_a * Polynomial(0 , [-1] ) def __neg__( self: Union[str, Any] ): return Polynomial(self.degree , [-c for c in self.coefficients] ) def __mul__( self: int , _UpperCAmelCase: Polynomial ): _lowerCAmelCase :list[float] = [0] * (self.degree + polynomial_a.degree + 1) for i in range(self.degree + 1 ): for j in range(polynomial_a.degree + 1 ): coefficients[i + j] += ( self.coefficients[i] * polynomial_a.coefficients[j] ) return Polynomial(self.degree + polynomial_a.degree , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: int | float ): _lowerCAmelCase :int | float = 0 for i in range(self.degree + 1 ): result += self.coefficients[i] * (substitution**i) return result def __str__( self: Union[str, Any] ): _lowerCAmelCase :Dict = '' for i in range(self.degree , -1 , -1 ): if self.coefficients[i] == 0: continue elif self.coefficients[i] > 0: if polynomial: polynomial += " + " else: polynomial += " - " if i == 0: polynomial += str(abs(self.coefficients[i] ) ) elif i == 1: polynomial += str(abs(self.coefficients[i] ) ) + "x" else: polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_UpperCAmelCase ) return polynomial def __repr__( self: Optional[Any] ): return self.__str__() def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ): _lowerCAmelCase :list[float] = [0] * self.degree for i in range(self.degree ): _lowerCAmelCase :Tuple = self.coefficients[i + 1] * (i + 1) return Polynomial(self.degree - 1 , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: int | float = 0 ): _lowerCAmelCase :list[float] = [0] * (self.degree + 2) _lowerCAmelCase :str = constant for i in range(self.degree + 1 ): _lowerCAmelCase :List[str] = self.coefficients[i] / (i + 1) return Polynomial(self.degree + 1 , _UpperCAmelCase ) def __eq__( self: List[Any] , _UpperCAmelCase: object ): if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): return False if self.degree != polynomial_a.degree: return False for i in range(self.degree + 1 ): if self.coefficients[i] != polynomial_a.coefficients[i]: return False return True def __ne__( self: Optional[Any] , _UpperCAmelCase: object ): return not self.__eq__(_UpperCAmelCase )
687
0
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments from transformers.testing_utils import TestCasePlus, require_torch, slow from transformers.utils import is_datasets_available if is_datasets_available(): import datasets class lowerCAmelCase_ ( lowerCamelCase_ ): @slow @require_torch def snake_case ( self ): SCREAMING_SNAKE_CASE_ : List[str] = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny' ,'prajjwal1/bert-tiny' ) SCREAMING_SNAKE_CASE_ : Tuple = BertTokenizer.from_pretrained('bert-base-uncased' ) SCREAMING_SNAKE_CASE_ : Dict = bertabert.config.encoder.vocab_size SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.sep_token_id SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.cls_token_id SCREAMING_SNAKE_CASE_ : Any = 128 SCREAMING_SNAKE_CASE_ : List[str] = datasets.load_dataset('cnn_dailymail' ,'3.0.0' ,split='train[:1%]' ) SCREAMING_SNAKE_CASE_ : Dict = datasets.load_dataset('cnn_dailymail' ,'3.0.0' ,split='validation[:1%]' ) SCREAMING_SNAKE_CASE_ : List[Any] = train_dataset.select(range(32 ) ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = val_dataset.select(range(16 ) ) SCREAMING_SNAKE_CASE_ : str = 4 def _map_to_encoder_decoder_inputs(snake_case__ ): # Tokenizer will automatically set [BOS] <text> [EOS] SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer(batch['article'] ,padding='max_length' ,truncation=snake_case__ ,max_length=512 ) SCREAMING_SNAKE_CASE_ : int = tokenizer(batch['highlights'] ,padding='max_length' ,truncation=snake_case__ ,max_length=128 ) SCREAMING_SNAKE_CASE_ : Any = inputs.input_ids SCREAMING_SNAKE_CASE_ : Optional[int] = inputs.attention_mask SCREAMING_SNAKE_CASE_ : List[str] = outputs.input_ids SCREAMING_SNAKE_CASE_ : int = outputs.input_ids.copy() SCREAMING_SNAKE_CASE_ : Optional[Any] = [ [-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['labels'] ] SCREAMING_SNAKE_CASE_ : int = outputs.attention_mask assert all(len(snake_case__ ) == 512 for x in inputs.input_ids ) assert all(len(snake_case__ ) == 128 for x in outputs.input_ids ) return batch def _compute_metrics(snake_case__ ): SCREAMING_SNAKE_CASE_ : Optional[Any] = pred.label_ids SCREAMING_SNAKE_CASE_ : Any = pred.predictions # all unnecessary tokens are removed SCREAMING_SNAKE_CASE_ : Any = tokenizer.batch_decode(snake_case__ ,skip_special_tokens=snake_case__ ) SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.batch_decode(snake_case__ ,skip_special_tokens=snake_case__ ) SCREAMING_SNAKE_CASE_ : List[Any] = sum([int(pred_str[i] == label_str[i] ) for i in range(len(snake_case__ ) )] ) / len(snake_case__ ) return {"accuracy": accuracy} # map train dataset SCREAMING_SNAKE_CASE_ : Optional[int] = train_dataset.map( _map_to_encoder_decoder_inputs ,batched=snake_case__ ,batch_size=snake_case__ ,remove_columns=['article', 'highlights'] ,) train_dataset.set_format( type='torch' ,columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] ,) # same for validation dataset SCREAMING_SNAKE_CASE_ : Dict = val_dataset.map( _map_to_encoder_decoder_inputs ,batched=snake_case__ ,batch_size=snake_case__ ,remove_columns=['article', 'highlights'] ,) val_dataset.set_format( type='torch' ,columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'] ,) SCREAMING_SNAKE_CASE_ : List[Any] = self.get_auto_remove_tmp_dir() SCREAMING_SNAKE_CASE_ : List[str] = SeqaSeqTrainingArguments( output_dir=snake_case__ ,per_device_train_batch_size=snake_case__ ,per_device_eval_batch_size=snake_case__ ,predict_with_generate=snake_case__ ,evaluation_strategy='steps' ,do_train=snake_case__ ,do_eval=snake_case__ ,warmup_steps=0 ,eval_steps=2 ,logging_steps=2 ,) # instantiate trainer SCREAMING_SNAKE_CASE_ : str = SeqaSeqTrainer( model=snake_case__ ,args=snake_case__ ,compute_metrics=_compute_metrics ,train_dataset=snake_case__ ,eval_dataset=snake_case__ ,tokenizer=snake_case__ ,) # start training trainer.train()
105
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available a = { """configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ """GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTNeoForCausalLM""", """GPTNeoForQuestionAnswering""", """GPTNeoForSequenceClassification""", """GPTNeoForTokenClassification""", """GPTNeoModel""", """GPTNeoPreTrainedModel""", """load_tf_weights_in_gpt_neo""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ """FlaxGPTNeoForCausalLM""", """FlaxGPTNeoModel""", """FlaxGPTNeoPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neo import ( GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, GPTNeoPreTrainedModel, load_tf_weights_in_gpt_neo, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel else: import sys a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
687
0
from __future__ import annotations from typing import Dict from ...configuration_utils import PretrainedConfig __snake_case :Tuple ={ 'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json', 'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json', } class lowerCAmelCase__ ( _lowerCamelCase ): A_ : Optional[int] = 'ernie_m' A_ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"} def __init__( self : Tuple , __UpperCamelCase : int = 250_002 , __UpperCamelCase : int = 768 , __UpperCamelCase : int = 12 , __UpperCamelCase : int = 12 , __UpperCamelCase : int = 3_072 , __UpperCamelCase : str = "gelu" , __UpperCamelCase : float = 0.1 , __UpperCamelCase : float = 0.1 , __UpperCamelCase : int = 514 , __UpperCamelCase : float = 0.0_2 , __UpperCamelCase : int = 1 , __UpperCamelCase : float = 1e-05 , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : int=0.0 , **__UpperCamelCase : Optional[int] , ) -> Union[str, Any]: super().__init__(pad_token_id=__UpperCamelCase , **__UpperCamelCase ) A = vocab_size A = hidden_size A = num_hidden_layers A = num_attention_heads A = intermediate_size A = hidden_act A = hidden_dropout_prob A = attention_probs_dropout_prob A = max_position_embeddings A = initializer_range A = layer_norm_eps A = classifier_dropout A = is_decoder A = act_dropout
106
from __future__ import annotations from decimal import Decimal from math import * # noqa: F403 from sympy import diff def UpperCamelCase_( __magic_name__ : str , __magic_name__ : float | Decimal , __magic_name__ : float = 10**-10 ): """simple docstring""" _lowerCAmelCase :Optional[Any] = a while True: _lowerCAmelCase :str = Decimal(__magic_name__ ) - ( Decimal(eval(__magic_name__ ) ) / Decimal(eval(str(diff(__magic_name__ ) ) ) ) # noqa: S307 ) # This number dictates the accuracy of the answer if abs(eval(__magic_name__ ) ) < precision: # noqa: S307 return float(__magic_name__ ) # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''') # Find root of polynomial print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}''') # Find Square Root of 5 print(F'''The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}''') # Exponential Roots print(F'''The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}''')
687
0
'''simple docstring''' import os from datetime import datetime as dt from github import Github _UpperCAmelCase : List[Any] = [ '''good first issue''', '''feature request''', '''wip''', ] def _SCREAMING_SNAKE_CASE ( ): _A = Github(os.environ['GITHUB_TOKEN'] ) _A = g.get_repo('huggingface/accelerate' ) _A = repo.get_issues(state='open' ) for issue in open_issues: _A = sorted([comment for comment in issue.get_comments()] , key=lambda __snake_case : i.created_at , reverse=__snake_case ) _A = comments[0] if len(__snake_case ) > 0 else None _A = dt.utcnow() _A = (current_time - issue.updated_at).days _A = (current_time - issue.created_at).days if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and days_since_updated > 7 and days_since_creation >= 3_0 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Close issue since it has been 7 days of inactivity since bot mention. issue.edit(state='closed' ) elif ( days_since_updated > 2_3 and days_since_creation >= 3_0 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Add stale comment issue.create_comment( 'This issue has been automatically marked as stale because it has not had ' 'recent activity. If you think this still needs to be addressed ' 'please comment on this thread.\n\nPlease note that issues that do not follow the ' '[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) ' 'are likely to be ignored.' ) if __name__ == "__main__": main()
107
import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) a = { """sample_size""": 32, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 2, """num_class_embeds""": 1_000, """block_out_channels""": [32, 64], """attention_head_dim""": 8, """down_block_types""": [ """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """scale_shift""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } a = { """sample_size""": 64, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 3, """num_class_embeds""": 1_000, """block_out_channels""": [192, 192 * 2, 192 * 3, 192 * 4], """attention_head_dim""": 64, """down_block_types""": [ """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """AttnUpBlock2D""", """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """scale_shift""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } a = { """sample_size""": 256, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 2, """num_class_embeds""": None, """block_out_channels""": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4], """attention_head_dim""": 64, """down_block_types""": [ """ResnetDownsampleBlock2D""", """ResnetDownsampleBlock2D""", """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """AttnUpBlock2D""", """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", """ResnetUpsampleBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """default""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } a = { """num_train_timesteps""": 40, """sigma_min""": 0.0_0_2, """sigma_max""": 8_0.0, } a = { """num_train_timesteps""": 201, """sigma_min""": 0.0_0_2, """sigma_max""": 8_0.0, } a = { """num_train_timesteps""": 151, """sigma_min""": 0.0_0_2, """sigma_max""": 8_0.0, } def UpperCamelCase_( __magic_name__ : Dict ): """simple docstring""" if isinstance(__magic_name__ , __magic_name__ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError('boolean value expected' ) def UpperCamelCase_( __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any]=False ): """simple docstring""" _lowerCAmelCase :int = checkpoint[f"""{old_prefix}.in_layers.0.weight"""] _lowerCAmelCase :Union[str, Any] = checkpoint[f"""{old_prefix}.in_layers.0.bias"""] _lowerCAmelCase :str = checkpoint[f"""{old_prefix}.in_layers.2.weight"""] _lowerCAmelCase :Optional[Any] = checkpoint[f"""{old_prefix}.in_layers.2.bias"""] _lowerCAmelCase :str = checkpoint[f"""{old_prefix}.emb_layers.1.weight"""] _lowerCAmelCase :Any = checkpoint[f"""{old_prefix}.emb_layers.1.bias"""] _lowerCAmelCase :str = checkpoint[f"""{old_prefix}.out_layers.0.weight"""] _lowerCAmelCase :List[Any] = checkpoint[f"""{old_prefix}.out_layers.0.bias"""] _lowerCAmelCase :Optional[int] = checkpoint[f"""{old_prefix}.out_layers.3.weight"""] _lowerCAmelCase :Dict = checkpoint[f"""{old_prefix}.out_layers.3.bias"""] if has_skip: _lowerCAmelCase :List[Any] = checkpoint[f"""{old_prefix}.skip_connection.weight"""] _lowerCAmelCase :int = checkpoint[f"""{old_prefix}.skip_connection.bias"""] return new_checkpoint def UpperCamelCase_( __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : List[str] , __magic_name__ : List[str]=None ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Tuple = checkpoint[f"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Any = checkpoint[f"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 ) _lowerCAmelCase :int = checkpoint[f"""{old_prefix}.norm.weight"""] _lowerCAmelCase :Dict = checkpoint[f"""{old_prefix}.norm.bias"""] _lowerCAmelCase :Dict = weight_q.squeeze(-1 ).squeeze(-1 ) _lowerCAmelCase :str = bias_q.squeeze(-1 ).squeeze(-1 ) _lowerCAmelCase :List[str] = weight_k.squeeze(-1 ).squeeze(-1 ) _lowerCAmelCase :Optional[Any] = bias_k.squeeze(-1 ).squeeze(-1 ) _lowerCAmelCase :Tuple = weight_v.squeeze(-1 ).squeeze(-1 ) _lowerCAmelCase :List[Any] = bias_v.squeeze(-1 ).squeeze(-1 ) _lowerCAmelCase :int = ( checkpoint[f"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 ) ) _lowerCAmelCase :Optional[Any] = checkpoint[f"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def UpperCamelCase_( __magic_name__ : str , __magic_name__ : Optional[Any] ): """simple docstring""" _lowerCAmelCase :Union[str, Any] = torch.load(__magic_name__ , map_location='cpu' ) _lowerCAmelCase :List[Any] = {} _lowerCAmelCase :List[str] = checkpoint['time_embed.0.weight'] _lowerCAmelCase :Tuple = checkpoint['time_embed.0.bias'] _lowerCAmelCase :Dict = checkpoint['time_embed.2.weight'] _lowerCAmelCase :Union[str, Any] = checkpoint['time_embed.2.bias'] if unet_config["num_class_embeds"] is not None: _lowerCAmelCase :Union[str, Any] = checkpoint['label_emb.weight'] _lowerCAmelCase :str = checkpoint['input_blocks.0.0.weight'] _lowerCAmelCase :str = checkpoint['input_blocks.0.0.bias'] _lowerCAmelCase :List[Any] = unet_config['down_block_types'] _lowerCAmelCase :Any = unet_config['layers_per_block'] _lowerCAmelCase :List[Any] = unet_config['attention_head_dim'] _lowerCAmelCase :Tuple = unet_config['block_out_channels'] _lowerCAmelCase :List[str] = 1 _lowerCAmelCase :Optional[int] = channels_list[0] for i, layer_type in enumerate(__magic_name__ ): _lowerCAmelCase :Tuple = channels_list[i] _lowerCAmelCase :Optional[Any] = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(__magic_name__ ): _lowerCAmelCase :int = f"""down_blocks.{i}.resnets.{j}""" _lowerCAmelCase :List[Any] = f"""input_blocks.{current_layer}.0""" _lowerCAmelCase :int = True if j == 0 and downsample_block_has_skip else False _lowerCAmelCase :List[Any] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(__magic_name__ ): _lowerCAmelCase :List[str] = f"""down_blocks.{i}.resnets.{j}""" _lowerCAmelCase :Optional[int] = f"""input_blocks.{current_layer}.0""" _lowerCAmelCase :List[str] = True if j == 0 and downsample_block_has_skip else False _lowerCAmelCase :Optional[int] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ ) _lowerCAmelCase :Optional[int] = f"""down_blocks.{i}.attentions.{j}""" _lowerCAmelCase :str = f"""input_blocks.{current_layer}.1""" _lowerCAmelCase :Optional[Any] = convert_attention( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) current_layer += 1 if i != len(__magic_name__ ) - 1: _lowerCAmelCase :Union[str, Any] = f"""down_blocks.{i}.downsamplers.0""" _lowerCAmelCase :Tuple = f"""input_blocks.{current_layer}.0""" _lowerCAmelCase :Optional[int] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) current_layer += 1 _lowerCAmelCase :Dict = current_channels # hardcoded the mid-block for now _lowerCAmelCase :int = 'mid_block.resnets.0' _lowerCAmelCase :Optional[Any] = 'middle_block.0' _lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) _lowerCAmelCase :Optional[int] = 'mid_block.attentions.0' _lowerCAmelCase :Optional[int] = 'middle_block.1' _lowerCAmelCase :List[Any] = convert_attention(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) _lowerCAmelCase :Union[str, Any] = 'mid_block.resnets.1' _lowerCAmelCase :Optional[int] = 'middle_block.2' _lowerCAmelCase :int = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) _lowerCAmelCase :Tuple = 0 _lowerCAmelCase :str = unet_config['up_block_types'] for i, layer_type in enumerate(__magic_name__ ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): _lowerCAmelCase :Optional[Any] = f"""up_blocks.{i}.resnets.{j}""" _lowerCAmelCase :Dict = f"""output_blocks.{current_layer}.0""" _lowerCAmelCase :Any = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ ) current_layer += 1 if i != len(__magic_name__ ) - 1: _lowerCAmelCase :Any = f"""up_blocks.{i}.upsamplers.0""" _lowerCAmelCase :Dict = f"""output_blocks.{current_layer-1}.1""" _lowerCAmelCase :Tuple = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): _lowerCAmelCase :Tuple = f"""up_blocks.{i}.resnets.{j}""" _lowerCAmelCase :List[str] = f"""output_blocks.{current_layer}.0""" _lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ ) _lowerCAmelCase :str = f"""up_blocks.{i}.attentions.{j}""" _lowerCAmelCase :List[Any] = f"""output_blocks.{current_layer}.1""" _lowerCAmelCase :int = convert_attention( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) current_layer += 1 if i != len(__magic_name__ ) - 1: _lowerCAmelCase :Optional[int] = f"""up_blocks.{i}.upsamplers.0""" _lowerCAmelCase :int = f"""output_blocks.{current_layer-1}.2""" _lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) _lowerCAmelCase :str = checkpoint['out.0.weight'] _lowerCAmelCase :Union[str, Any] = checkpoint['out.0.bias'] _lowerCAmelCase :List[Any] = checkpoint['out.2.weight'] _lowerCAmelCase :Dict = checkpoint['out.2.bias'] return new_checkpoint if __name__ == "__main__": a = argparse.ArgumentParser() parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""") parser.add_argument( """--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model.""" ) parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""") a = parser.parse_args() a = strabool(args.class_cond) a = os.path.basename(args.unet_path) print(F'''Checkpoint: {ckpt_name}''') # Get U-Net config if "imagenet64" in ckpt_name: a = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): a = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: a = TEST_UNET_CONFIG else: raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''') if not args.class_cond: a = None a = con_pt_to_diffuser(args.unet_path, unet_config) a = UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: a = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: a = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): a = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''') a = CMStochasticIterativeScheduler(**scheduler_config) a = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
687
0
from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case , __snake_case ) -> List[str]: for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})""" def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case=True ) -> Optional[int]: model.train() _UpperCAmelCase = model(__snake_case ) _UpperCAmelCase = F.mse_loss(__snake_case , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(__snake_case ) def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case=False ) -> Optional[int]: set_seed(4_2 ) _UpperCAmelCase = RegressionModel() _UpperCAmelCase = deepcopy(__snake_case ) _UpperCAmelCase = RegressionDataset(length=8_0 ) _UpperCAmelCase = DataLoader(__snake_case , batch_size=1_6 ) model.to(accelerator.device ) if sched: _UpperCAmelCase = AdamW(params=model.parameters() , lr=1E-3 ) _UpperCAmelCase = AdamW(params=ddp_model.parameters() , lr=1E-3 ) _UpperCAmelCase = LambdaLR(__snake_case , lr_lambda=lambda __snake_case : epoch**0.65 ) _UpperCAmelCase = LambdaLR(__snake_case , lr_lambda=lambda __snake_case : epoch**0.65 ) # Make a copy of `model` if sched: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare(__snake_case , __snake_case , __snake_case , __snake_case ) else: _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare(__snake_case , __snake_case ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def _SCREAMING_SNAKE_CASE ( __snake_case ) -> Dict: # Test when on a single CPU or GPU that the context manager does nothing _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = get_training_setup(__snake_case ) # Use a single batch _UpperCAmelCase , _UpperCAmelCase = next(iter(__snake_case ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model _UpperCAmelCase , _UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) ) _UpperCAmelCase , _UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(__snake_case , __snake_case , __snake_case , __snake_case ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(__snake_case ): step_model(__snake_case , __snake_case , __snake_case , __snake_case ) else: # Sync grads step_model(__snake_case , __snake_case , __snake_case , __snake_case ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(__snake_case , __snake_case , __snake_case , __snake_case ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) _UpperCAmelCase = ddp_input[torch.randperm(len(__snake_case ) )] def _SCREAMING_SNAKE_CASE ( __snake_case ) -> int: # Test on distributed setup that context manager behaves properly _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = get_training_setup(__snake_case ) # Use a single batch _UpperCAmelCase , _UpperCAmelCase = next(iter(__snake_case ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model _UpperCAmelCase , _UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) ) _UpperCAmelCase , _UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(__snake_case , __snake_case , __snake_case , __snake_case ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(__snake_case ): step_model(__snake_case , __snake_case , __snake_case , __snake_case ) else: # Sync grads step_model(__snake_case , __snake_case , __snake_case , __snake_case ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) _UpperCAmelCase = ddp_input[torch.randperm(len(__snake_case ) )] def _SCREAMING_SNAKE_CASE ( __snake_case=False , __snake_case=False ) -> Any: _UpperCAmelCase = Accelerator( split_batches=__snake_case , dispatch_batches=__snake_case , gradient_accumulation_steps=2 ) # Test that context manager behaves properly _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = get_training_setup(__snake_case ) for iteration, batch in enumerate(__snake_case ): _UpperCAmelCase , _UpperCAmelCase = batch.values() # Gather the distributed inputs and targs for the base model _UpperCAmelCase , _UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) ) _UpperCAmelCase , _UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) # Do "gradient accumulation" (noop) with accelerator.accumulate(__snake_case ): step_model(__snake_case , __snake_case , __snake_case , __snake_case ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(__snake_case ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), f"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})""" else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), f"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})""" # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) _UpperCAmelCase = ddp_input[torch.randperm(len(__snake_case ) )] GradientState._reset_state() def _SCREAMING_SNAKE_CASE ( __snake_case=False , __snake_case=False ) -> Dict: _UpperCAmelCase = Accelerator( split_batches=__snake_case , dispatch_batches=__snake_case , gradient_accumulation_steps=2 ) # Test that context manager behaves properly _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = get_training_setup(__snake_case , __snake_case ) for iteration, batch in enumerate(__snake_case ): _UpperCAmelCase , _UpperCAmelCase = batch.values() # Gather the distributed inputs and targs for the base model _UpperCAmelCase , _UpperCAmelCase = accelerator.gather((ddp_input, ddp_target) ) _UpperCAmelCase , _UpperCAmelCase = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(__snake_case )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(__snake_case ): step_model(__snake_case , __snake_case , __snake_case , __snake_case ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), f"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n""" _UpperCAmelCase = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(__snake_case )) if accelerator.num_processes > 1: check_model_parameters(__snake_case , __snake_case , __snake_case , __snake_case ) # Shuffle ddp_input on each iteration torch.manual_seed(1_3_3_7 + iteration ) GradientState._reset_state() def _SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]: _UpperCAmelCase = Accelerator() _UpperCAmelCase = RegressionDataset(length=8_0 ) _UpperCAmelCase = DataLoader(__snake_case , batch_size=1_6 ) _UpperCAmelCase = RegressionDataset(length=9_6 ) _UpperCAmelCase = DataLoader(__snake_case , batch_size=1_6 ) _UpperCAmelCase , _UpperCAmelCase = accelerator.prepare(__snake_case , __snake_case ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(__snake_case ): assert id(accelerator.gradient_state.active_dataloader ) == id(__snake_case ) if iteration < len(__snake_case ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(__snake_case ): assert id(accelerator.gradient_state.active_dataloader ) == id(__snake_case ) if batch_num < len(__snake_case ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def _SCREAMING_SNAKE_CASE ( ) -> List[Any]: _UpperCAmelCase = Accelerator() _UpperCAmelCase = accelerator.state if state.local_process_index == 0: print("""**Test `accumulate` gradient accumulation with dataloader break**""" ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print("""**Test NOOP `no_sync` context manager**""" ) test_noop_sync(__snake_case ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print("""**Test Distributed `no_sync` context manager**""" ) test_distributed_sync(__snake_case ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation, """ , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , ) test_gradient_accumulation(__snake_case , __snake_case ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( """**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , f"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , ) test_gradient_accumulation_with_opt_and_scheduler(__snake_case , __snake_case ) def _SCREAMING_SNAKE_CASE ( __snake_case ) -> Tuple: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
108
import os import re import shutil import sys import tempfile import unittest import black a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. a = """ \"\"\" Output class for the scheduler's step function output. Args: prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the denoising loop. pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): The predicted denoised sample (x_{0}) based on the model output from the current timestep. `pred_original_sample` can be used to preview progress or for guidance. \"\"\" prev_sample: torch.FloatTensor pred_original_sample: Optional[torch.FloatTensor] = None """ class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE__ ( self: Dict ): _lowerCAmelCase :Optional[Any] = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) ) _lowerCAmelCase :Tuple = self.diffusers_dir shutil.copy( os.path.join(_UpperCAmelCase , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , ) def SCREAMING_SNAKE_CASE__ ( self: List[Any] ): _lowerCAmelCase :str = 'src/diffusers' shutil.rmtree(self.diffusers_dir ) def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Tuple=None ): _lowerCAmelCase :int = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code if overwrite_result is not None: _lowerCAmelCase :Dict = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result _lowerCAmelCase :Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 ) _lowerCAmelCase :List[str] = black.format_str(_UpperCAmelCase , mode=_UpperCAmelCase ) _lowerCAmelCase :Union[str, Any] = os.path.join(self.diffusers_dir , 'new_code.py' ) with open(_UpperCAmelCase , 'w' , newline='\n' ) as f: f.write(_UpperCAmelCase ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(_UpperCAmelCase ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=_UpperCAmelCase ) with open(_UpperCAmelCase , 'r' ) as f: self.assertTrue(f.read() , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ): _lowerCAmelCase :List[str] = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ): # Base copy consistency self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , ) # With no empty line at the end self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , _UpperCAmelCase , ) # Copy consistency with rename self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , _UpperCAmelCase ) , ) # Copy consistency with a really long name _lowerCAmelCase :Optional[int] = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason' self.check_copy_consistency( f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('Bert' , _UpperCAmelCase , _UpperCAmelCase ) , ) # Copy consistency with overwrite self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , _UpperCAmelCase , overwrite_result=re.sub('DDPM' , 'Test' , _UpperCAmelCase ) , )
687
0
'''simple docstring''' from collections import OrderedDict from typing import TYPE_CHECKING, Any, List, Mapping, Optional from packaging import version if TYPE_CHECKING: from ... import PreTrainedTokenizer, TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import is_torch_available, logging a = logging.get_logger(__name__) a = { "bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json", "bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json", "bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json", "bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json", "bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json", "bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json", } class __a ( _snake_case ): __UpperCamelCase : List[str] = 'bloom' __UpperCamelCase : Optional[Any] = ['past_key_values'] __UpperCamelCase : Optional[int] = { 'num_hidden_layers': 'n_layer', 'num_attention_heads': 'n_head', } def __init__( self : Union[str, Any] ,lowerCamelCase : str=25_0880 ,lowerCamelCase : List[Any]=64 ,lowerCamelCase : Optional[Any]=2 ,lowerCamelCase : Tuple=8 ,lowerCamelCase : Union[str, Any]=1E-5 ,lowerCamelCase : Optional[Any]=0.02 ,lowerCamelCase : str=True ,lowerCamelCase : List[Any]=1 ,lowerCamelCase : Union[str, Any]=2 ,lowerCamelCase : Optional[int]=False ,lowerCamelCase : Optional[int]=0.0 ,lowerCamelCase : List[Any]=0.0 ,lowerCamelCase : Optional[Any]=1 ,lowerCamelCase : Tuple=False ,**lowerCamelCase : str ,): '''simple docstring''' __SCREAMING_SNAKE_CASE = vocab_size # Backward compatibility with n_embed kwarg __SCREAMING_SNAKE_CASE = kwargs.pop("""n_embed""" ,lowerCamelCase ) __SCREAMING_SNAKE_CASE = hidden_size if n_embed is None else n_embed __SCREAMING_SNAKE_CASE = n_layer __SCREAMING_SNAKE_CASE = n_head __SCREAMING_SNAKE_CASE = layer_norm_epsilon __SCREAMING_SNAKE_CASE = initializer_range __SCREAMING_SNAKE_CASE = use_cache __SCREAMING_SNAKE_CASE = pretraining_tp __SCREAMING_SNAKE_CASE = apply_residual_connection_post_layernorm __SCREAMING_SNAKE_CASE = hidden_dropout __SCREAMING_SNAKE_CASE = attention_dropout __SCREAMING_SNAKE_CASE = bos_token_id __SCREAMING_SNAKE_CASE = eos_token_id __SCREAMING_SNAKE_CASE = slow_but_exact super().__init__(bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase ,**lowerCamelCase ) class __a ( _snake_case ): __UpperCamelCase : int = version.parse('1.12' ) def __init__( self : Optional[int] ,lowerCamelCase : PretrainedConfig ,lowerCamelCase : str = "default" ,lowerCamelCase : List[PatchingSpec] = None ,lowerCamelCase : bool = False ,): '''simple docstring''' super().__init__(lowerCamelCase ,task=lowerCamelCase ,patching_specs=lowerCamelCase ,use_past=lowerCamelCase ) if not getattr(self._config ,"""pad_token_id""" ,lowerCamelCase ): # TODO: how to do that better? __SCREAMING_SNAKE_CASE = 0 @property def UpperCAmelCase__ ( self : int ): '''simple docstring''' __SCREAMING_SNAKE_CASE = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: # BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344 self.fill_with_past_key_values_(lowerCamelCase ,direction="""inputs""" ,inverted_values_shape=lowerCamelCase ) __SCREAMING_SNAKE_CASE = {0: """batch""", 1: """past_sequence + sequence"""} else: __SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""} return common_inputs @property def UpperCAmelCase__ ( self : Tuple ): '''simple docstring''' return self._config.n_layer @property def UpperCAmelCase__ ( self : Dict ): '''simple docstring''' return self._config.n_head @property def UpperCAmelCase__ ( self : str ): '''simple docstring''' return 1E-3 def UpperCAmelCase__ ( self : str ,lowerCamelCase : "PreTrainedTokenizer" ,lowerCamelCase : int = -1 ,lowerCamelCase : int = -1 ,lowerCamelCase : bool = False ,lowerCamelCase : Optional["TensorType"] = None ,): '''simple docstring''' __SCREAMING_SNAKE_CASE = super(lowerCamelCase ,self ).generate_dummy_inputs( lowerCamelCase ,batch_size=lowerCamelCase ,seq_length=lowerCamelCase ,is_pair=lowerCamelCase ,framework=lowerCamelCase ) # We need to order the input in the way they appears in the forward() __SCREAMING_SNAKE_CASE = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values __SCREAMING_SNAKE_CASE = seqlen + 2 __SCREAMING_SNAKE_CASE = self._config.hidden_size // self.num_attention_heads __SCREAMING_SNAKE_CASE = ( batch * self.num_attention_heads, head_dim, past_key_values_length, ) __SCREAMING_SNAKE_CASE = ( batch * self.num_attention_heads, past_key_values_length, head_dim, ) __SCREAMING_SNAKE_CASE = [ (torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) for _ in range(self.num_layers ) ] __SCREAMING_SNAKE_CASE = common_inputs["""attention_mask"""] if self.use_past: __SCREAMING_SNAKE_CASE = ordered_inputs["""attention_mask"""].dtype __SCREAMING_SNAKE_CASE = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(lowerCamelCase ,lowerCamelCase ,dtype=lowerCamelCase )] ,dim=1 ) return ordered_inputs @property def UpperCAmelCase__ ( self : str ): '''simple docstring''' return 13
109
from dataclasses import dataclass, field from typing import Optional @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be trained.'} ) lowerCamelCase : Optional[str] = field( default='./' , metadata={'help': 'Save dir where model repo is cloned and models updates are saved to.'} ) lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path of training dataset.'} ) lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} ) lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for training.'} ) lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for evaluation.'} ) lowerCamelCase : Optional[float] = field(default=0.1 , metadata={'help': 'Value of weight decay.'} ) lowerCamelCase : Optional[int] = field( default=1_00_00 , metadata={'help': 'Size of buffer used to shuffle streaming dataset.'} ) lowerCamelCase : Optional[float] = field(default=2e-4 , metadata={'help': 'Learning rate fo training.'} ) lowerCamelCase : Optional[str] = field(default='cosine' , metadata={'help': 'Learning rate.'} ) lowerCamelCase : Optional[int] = field( default=7_50 , metadata={'help': 'Number of warmup steps in the learning rate schedule.'} ) lowerCamelCase : Optional[int] = field( default=16 , metadata={'help': 'Number of gradient accumulation steps.'} ) lowerCamelCase : Optional[bool] = field( default=snake_case__ , metadata={'help': 'Use gradient checkpointing to reduce memory footprint.'} ) lowerCamelCase : Optional[int] = field(default=5_00_00 , metadata={'help': 'Maximum number of training steps.'} ) lowerCamelCase : Optional[int] = field( default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} ) lowerCamelCase : Optional[int] = field(default=10_24 , metadata={'help': 'Sequence lengths used for training.'} ) lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Training seed.'} ) lowerCamelCase : Optional[int] = field( default=10_24 , metadata={'help': 'Interval to save checkpoints. Measured as number of forward passes not training steps.'} , ) lowerCamelCase : Optional[str] = field( default=snake_case__ , metadata={'help': 'States path if the training should continue from a checkpoint folder.'} ) lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'If True the data is pretokenized.'} ) @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} ) lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} ) lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size used for evaluation.'} ) lowerCamelCase : Optional[int] = field( default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} ) lowerCamelCase : Optional[int] = field(default=10_24 , metadata={'help': 'Length of sequences to be evaluated.'} ) lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} ) @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} ) lowerCamelCase : Optional[int] = field(default=snake_case__ , metadata={'help': 'Number of workers used for code evaluation.'} ) lowerCamelCase : Optional[int] = field( default=snake_case__ , metadata={'help': 'The number of human-eval tasks to run. If not included all tasks are evaluated.'} , ) lowerCamelCase : Optional[bool] = field( default=snake_case__ , metadata={'help': 'Sample from the language model\'s output distribution.'} ) lowerCamelCase : Optional[float] = field(default=0.2 , metadata={'help': 'Sampling temperature used for generation.'} ) lowerCamelCase : Optional[int] = field(default=2_56 , metadata={'help': 'Maximum number of newly generated tokens.'} ) lowerCamelCase : Optional[int] = field(default=0 , metadata={'help': 'Top-k parameter used for generation.'} ) lowerCamelCase : Optional[float] = field(default=0.95 , metadata={'help': 'Top-p parameter used for nucleus sampling.'} ) lowerCamelCase : Optional[int] = field(default=10 , metadata={'help': 'Number of generations to run in parallel.'} ) lowerCamelCase : Optional[int] = field( default=2_00 , metadata={'help': 'Number of completions to generate for each sample.'} ) lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} ) lowerCamelCase : Optional[str] = field( default='eval_results.json' , metadata={'help': 'Random seed used for evaluation.'} ) lowerCamelCase : Optional[str] = field( default='0' , metadata={'help': 'Allow `code_eval` to execute Python code on machine'} ) lowerCamelCase : Optional[int] = field( default=-1 , metadata={ 'help': ( 'Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive' ' number corresponds to which GPU device id to run on.' ) } , ) @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : Optional[int] = field( default=snake_case__ , metadata={ 'help': 'The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.' } , ) lowerCamelCase : Optional[str] = field( default='transformersbook/codeparrot' , metadata={'help': 'Folder or name of dataset to process.'} ) lowerCamelCase : Optional[str] = field( default='codeparrot-clean' , metadata={'help': 'Folder to save processed processed dataset.'} ) lowerCamelCase : Optional[int] = field( default=10_00_00 , metadata={'help': 'Number of files to save per JSON output file.'} ) lowerCamelCase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} ) lowerCamelCase : Optional[float] = field( default=10_00 , metadata={'help': 'Maximum line length in file, otherwise file is filtered.'} ) lowerCamelCase : Optional[float] = field( default=1_00 , metadata={'help': 'Maximum mean line length in file, otherwise file is filtered.'} ) lowerCamelCase : Optional[float] = field( default=0.25 , metadata={'help': 'Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'} ) lowerCamelCase : Optional[float] = field( default=1.5 , metadata={'help': 'Minimum character token ratio for the file, otherwise file is filtered.'} ) lowerCamelCase : Optional[float] = field( default=0.7 , metadata={'help': 'Probability for filtering config, test and uncommon files.'} ) lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} , ) lowerCamelCase : Optional[bool] = field( default=snake_case__ , metadata={'help': 'If True, near-duplicate samples are removed.'} ) lowerCamelCase : Optional[float] = field( default=0.85 , metadata={'help': 'Jaccard threshold for near-duplicate samples.'} ) @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : Optional[str] = field( default='gpt2' , metadata={'help': 'Base tokenizer to build new tokenizer from.'} ) lowerCamelCase : Optional[str] = field( default='transformersbook/codeparrot-train' , metadata={'help': 'Dataset to train tokenizer on.'} ) lowerCamelCase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} ) lowerCamelCase : Optional[int] = field(default=20_00_00 , metadata={'help': 'Number of examples to train tokenizer on.'} ) lowerCamelCase : Optional[int] = field( default=3_27_68 , metadata={'help': 'Number of examples to train the tokenizer on.'} ) lowerCamelCase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of new tokenizer.'} ) lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'Push saved tokenizer to the hub.'} ) @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} ) lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path to the dataset to pretokenize.'} ) lowerCamelCase : Optional[str] = field( default='tokenized-codeparrot-train' , metadata={'help': 'Repo name of the pretokenized data.'} ) lowerCamelCase : Optional[int] = field(default=snake_case__ , metadata={'help': 'Number of workers used for code evaluation.'} ) @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : Optional[str] = field( default='gpt2-large' , metadata={'help': 'Configuration to use for model initialization.'} ) lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot' , metadata={'help': 'Tokenizer attached to model.'} ) lowerCamelCase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of the created model.'} ) lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'Push saved tokenizer to the hub.'} )
687
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''} class _snake_case ( snake_case__ ): """simple docstring""" a = 'openai-gpt' a = { 'max_position_embeddings': 'n_positions', 'hidden_size': 'n_embd', 'num_attention_heads': 'n_head', 'num_hidden_layers': 'n_layer', } def __init__( self : List[Any] , _A : Optional[Any]=4_0_4_7_8 , _A : Any=5_1_2 , _A : Optional[int]=7_6_8 , _A : Dict=1_2 , _A : Union[str, Any]=1_2 , _A : int="gelu" , _A : Dict=0.1 , _A : str=0.1 , _A : List[Any]=0.1 , _A : Union[str, Any]=1e-5 , _A : Tuple=0.02 , _A : int="cls_index" , _A : str=True , _A : str=None , _A : str=True , _A : Dict=0.1 , **_A : Tuple , ): """simple docstring""" _SCREAMING_SNAKE_CASE : List[str] = vocab_size _SCREAMING_SNAKE_CASE : Dict = n_positions _SCREAMING_SNAKE_CASE : List[Any] = n_embd _SCREAMING_SNAKE_CASE : Tuple = n_layer _SCREAMING_SNAKE_CASE : Optional[Any] = n_head _SCREAMING_SNAKE_CASE : Optional[Any] = afn _SCREAMING_SNAKE_CASE : List[Any] = resid_pdrop _SCREAMING_SNAKE_CASE : List[Any] = embd_pdrop _SCREAMING_SNAKE_CASE : Dict = attn_pdrop _SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_epsilon _SCREAMING_SNAKE_CASE : Dict = initializer_range _SCREAMING_SNAKE_CASE : int = summary_type _SCREAMING_SNAKE_CASE : Dict = summary_use_proj _SCREAMING_SNAKE_CASE : List[str] = summary_activation _SCREAMING_SNAKE_CASE : Optional[int] = summary_first_dropout _SCREAMING_SNAKE_CASE : List[str] = summary_proj_to_labels super().__init__(**_UpperCAmelCase)
338
import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE__ ( self: List[Any] ): _lowerCAmelCase :List[str] = 'ylacombe/bark-small' _lowerCAmelCase :int = tempfile.mkdtemp() _lowerCAmelCase :List[str] = 'en_speaker_1' _lowerCAmelCase :Union[str, Any] = 'This is a test string' _lowerCAmelCase :List[Any] = 'speaker_embeddings_path.json' _lowerCAmelCase :str = 'speaker_embeddings' def SCREAMING_SNAKE_CASE__ ( self: str , **_UpperCAmelCase: Optional[Any] ): return AutoTokenizer.from_pretrained(self.checkpoint , **_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE__ ( self: Tuple ): _lowerCAmelCase :List[Any] = self.get_tokenizer() _lowerCAmelCase :List[str] = BarkProcessor(tokenizer=_UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) _lowerCAmelCase :List[str] = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def SCREAMING_SNAKE_CASE__ ( self: List[str] ): _lowerCAmelCase :List[str] = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) _lowerCAmelCase :Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) _lowerCAmelCase :Any = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): _lowerCAmelCase :Tuple = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) _lowerCAmelCase :List[Any] = 35 _lowerCAmelCase :Optional[int] = 2 _lowerCAmelCase :Dict = 8 _lowerCAmelCase :Dict = { 'semantic_prompt': np.ones(_UpperCAmelCase ), 'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ), 'fine_prompt': np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset _lowerCAmelCase :Dict = processor(text=self.input_string , voice_preset=_UpperCAmelCase ) _lowerCAmelCase :List[Any] = inputs['history_prompt'] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() ) # test loading voice preset from npz file _lowerCAmelCase :int = os.path.join(self.tmpdirname , 'file.npz' ) np.savez(_UpperCAmelCase , **_UpperCAmelCase ) _lowerCAmelCase :Dict = processor(text=self.input_string , voice_preset=_UpperCAmelCase ) _lowerCAmelCase :Optional[int] = inputs['history_prompt'] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() ) # test loading voice preset from the hub _lowerCAmelCase :Tuple = processor(text=self.input_string , voice_preset=self.voice_preset ) def SCREAMING_SNAKE_CASE__ ( self: List[Any] ): _lowerCAmelCase :Tuple = self.get_tokenizer() _lowerCAmelCase :Union[str, Any] = BarkProcessor(tokenizer=_UpperCAmelCase ) _lowerCAmelCase :List[Any] = processor(text=self.input_string ) _lowerCAmelCase :List[str] = tokenizer( self.input_string , padding='max_length' , max_length=256 , add_special_tokens=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
687
0
def __UpperCamelCase ( lowercase__ : int , lowercase__ : int ) -> str: '''simple docstring''' if number < 0 or shift_amount < 0: raise ValueError("""both inputs must be positive integers""" ) lowerCAmelCase_ : int = str(bin(lowercase__ ) ) binary_number += "0" * shift_amount return binary_number def __UpperCamelCase ( lowercase__ : int , lowercase__ : int ) -> Any: '''simple docstring''' if number < 0 or shift_amount < 0: raise ValueError("""both inputs must be positive integers""" ) lowerCAmelCase_ : Union[str, Any] = str(bin(lowercase__ ) )[2:] if shift_amount >= len(lowercase__ ): return "0b0" lowerCAmelCase_ : str = binary_number[: len(lowercase__ ) - shift_amount] return "0b" + shifted_binary_number def __UpperCamelCase ( lowercase__ : int , lowercase__ : int ) -> Optional[Any]: '''simple docstring''' if number >= 0: # Get binary representation of positive number lowerCAmelCase_ : str = '0' + str(bin(lowercase__ ) ).strip("""-""" )[2:] else: # Get binary (2's complement) representation of negative number lowerCAmelCase_ : Dict = len(bin(lowercase__ )[3:] ) # Find 2's complement of number lowerCAmelCase_ : Union[str, Any] = bin(abs(lowercase__ ) - (1 << binary_number_length) )[3:] lowerCAmelCase_ : List[Any] = ( '1' + '0' * (binary_number_length - len(lowercase__ )) + binary_number ) if shift_amount >= len(lowercase__ ): return "0b" + binary_number[0] * len(lowercase__ ) return ( "0b" + binary_number[0] * shift_amount + binary_number[: len(lowercase__ ) - shift_amount] ) if __name__ == "__main__": import doctest doctest.testmod()
600
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a = logging.get_logger(__name__) a = { """bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""", """bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""", """bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""", """bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""", """bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""", """bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""", """bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""", """bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""", """bert-large-uncased-whole-word-masking""": ( """https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json""" ), """bert-large-cased-whole-word-masking""": ( """https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json""" ), """bert-large-uncased-whole-word-masking-finetuned-squad""": ( """https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json""" ), """bert-large-cased-whole-word-masking-finetuned-squad""": ( """https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json""" ), """bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""", """bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""", """bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""", """cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""", """cl-tohoku/bert-base-japanese-whole-word-masking""": ( """https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json""" ), """cl-tohoku/bert-base-japanese-char""": ( """https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json""" ), """cl-tohoku/bert-base-japanese-char-whole-word-masking""": ( """https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json""" ), """TurkuNLP/bert-base-finnish-cased-v1""": ( """https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json""" ), """TurkuNLP/bert-base-finnish-uncased-v1""": ( """https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json""" ), """wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""", # See all BERT models at https://huggingface.co/models?filter=bert } class UpperCAmelCase_ (snake_case__ ): """simple docstring""" lowerCamelCase : int = 'bert' def __init__( self: Optional[Any] , _UpperCAmelCase: Tuple=3_0522 , _UpperCAmelCase: int=768 , _UpperCAmelCase: Union[str, Any]=12 , _UpperCAmelCase: Dict=12 , _UpperCAmelCase: List[Any]=3072 , _UpperCAmelCase: List[Any]="gelu" , _UpperCAmelCase: Union[str, Any]=0.1 , _UpperCAmelCase: Dict=0.1 , _UpperCAmelCase: List[Any]=512 , _UpperCAmelCase: Optional[Any]=2 , _UpperCAmelCase: Optional[int]=0.0_2 , _UpperCAmelCase: Any=1e-1_2 , _UpperCAmelCase: Optional[Any]=0 , _UpperCAmelCase: Union[str, Any]="absolute" , _UpperCAmelCase: Dict=True , _UpperCAmelCase: Optional[Any]=None , **_UpperCAmelCase: Optional[int] , ): super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase ) _lowerCAmelCase :List[Any] = vocab_size _lowerCAmelCase :Tuple = hidden_size _lowerCAmelCase :Dict = num_hidden_layers _lowerCAmelCase :Optional[Any] = num_attention_heads _lowerCAmelCase :List[Any] = hidden_act _lowerCAmelCase :int = intermediate_size _lowerCAmelCase :Tuple = hidden_dropout_prob _lowerCAmelCase :Tuple = attention_probs_dropout_prob _lowerCAmelCase :List[Any] = max_position_embeddings _lowerCAmelCase :Dict = type_vocab_size _lowerCAmelCase :Any = initializer_range _lowerCAmelCase :int = layer_norm_eps _lowerCAmelCase :List[Any] = position_embedding_type _lowerCAmelCase :int = use_cache _lowerCAmelCase :Union[str, Any] = classifier_dropout class UpperCAmelCase_ (snake_case__ ): """simple docstring""" @property def SCREAMING_SNAKE_CASE__ ( self: List[Any] ): if self.task == "multiple-choice": _lowerCAmelCase :List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _lowerCAmelCase :Any = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ] )
687
0
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase =logging.get_logger(__name__) class __magic_name__ ( snake_case__ ): UpperCAmelCase ='encoder-decoder' UpperCAmelCase =True def __init__( self , **snake_case) -> str: '''simple docstring''' super().__init__(**_UpperCAmelCase) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" _UpperCAmelCase : Optional[Any] =kwargs.pop('encoder') _UpperCAmelCase : Dict =encoder_config.pop('model_type') _UpperCAmelCase : str =kwargs.pop('decoder') _UpperCAmelCase : str =decoder_config.pop('model_type') from ..auto.configuration_auto import AutoConfig _UpperCAmelCase : str =AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase) _UpperCAmelCase : Tuple =AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase) _UpperCAmelCase : Any =True @classmethod def lowerCAmelCase ( cls , snake_case , snake_case , **snake_case) -> Optional[Any]: '''simple docstring''' logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config') _UpperCAmelCase : Dict =True _UpperCAmelCase : List[str] =True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_UpperCAmelCase) def lowerCAmelCase ( self) -> Optional[int]: '''simple docstring''' _UpperCAmelCase : Union[str, Any] =copy.deepcopy(self.__dict__) _UpperCAmelCase : Optional[int] =self.encoder.to_dict() _UpperCAmelCase : Union[str, Any] =self.decoder.to_dict() _UpperCAmelCase : List[str] =self.__class__.model_type return output
446
import inspect from typing import Optional, Union import numpy as np import PIL import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import ( PIL_INTERPOLATION, randn_tensor, ) def UpperCamelCase_( __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Tuple ): """simple docstring""" if isinstance(__magic_name__ , torch.Tensor ): return image elif isinstance(__magic_name__ , PIL.Image.Image ): _lowerCAmelCase :Tuple = [image] if isinstance(image[0] , PIL.Image.Image ): _lowerCAmelCase :List[Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image] _lowerCAmelCase :Optional[Any] = np.concatenate(__magic_name__ , axis=0 ) _lowerCAmelCase :Any = np.array(__magic_name__ ).astype(np.floataa ) / 255.0 _lowerCAmelCase :Optional[int] = image.transpose(0 , 3 , 1 , 2 ) _lowerCAmelCase :int = 2.0 * image - 1.0 _lowerCAmelCase :Optional[int] = torch.from_numpy(__magic_name__ ) elif isinstance(image[0] , torch.Tensor ): _lowerCAmelCase :str = torch.cat(__magic_name__ , dim=0 ) return image def UpperCamelCase_( __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : int=0.9995 ): """simple docstring""" if not isinstance(__magic_name__ , np.ndarray ): _lowerCAmelCase :Tuple = True _lowerCAmelCase :str = va.device _lowerCAmelCase :List[str] = va.cpu().numpy() _lowerCAmelCase :List[str] = va.cpu().numpy() _lowerCAmelCase :Any = np.sum(va * va / (np.linalg.norm(__magic_name__ ) * np.linalg.norm(__magic_name__ )) ) if np.abs(__magic_name__ ) > DOT_THRESHOLD: _lowerCAmelCase :Optional[Any] = (1 - t) * va + t * va else: _lowerCAmelCase :int = np.arccos(__magic_name__ ) _lowerCAmelCase :Union[str, Any] = np.sin(__magic_name__ ) _lowerCAmelCase :Union[str, Any] = theta_a * t _lowerCAmelCase :str = np.sin(__magic_name__ ) _lowerCAmelCase :Any = np.sin(theta_a - theta_t ) / sin_theta_a _lowerCAmelCase :Optional[Any] = sin_theta_t / sin_theta_a _lowerCAmelCase :List[Any] = sa * va + sa * va if inputs_are_torch: _lowerCAmelCase :int = torch.from_numpy(__magic_name__ ).to(__magic_name__ ) return va def UpperCamelCase_( __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ): """simple docstring""" _lowerCAmelCase :Any = F.normalize(__magic_name__ , dim=-1 ) _lowerCAmelCase :str = F.normalize(__magic_name__ , dim=-1 ) return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 ) def UpperCamelCase_( __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] ): """simple docstring""" for param in model.parameters(): _lowerCAmelCase :List[str] = value class UpperCAmelCase_ (snake_case__ ): """simple docstring""" def __init__( self: Any , _UpperCAmelCase: AutoencoderKL , _UpperCAmelCase: CLIPTextModel , _UpperCAmelCase: CLIPModel , _UpperCAmelCase: CLIPTokenizer , _UpperCAmelCase: UNetaDConditionModel , _UpperCAmelCase: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , _UpperCAmelCase: CLIPFeatureExtractor , _UpperCAmelCase: str=None , _UpperCAmelCase: Tuple=None , _UpperCAmelCase: Union[str, Any]=None , ): super().__init__() self.register_modules( vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , clip_model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , coca_model=_UpperCAmelCase , coca_tokenizer=_UpperCAmelCase , coca_transform=_UpperCAmelCase , ) _lowerCAmelCase :int = ( feature_extractor.size if isinstance(feature_extractor.size , _UpperCAmelCase ) else feature_extractor.size['shortest_edge'] ) _lowerCAmelCase :Union[str, Any] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std ) set_requires_grad(self.text_encoder , _UpperCAmelCase ) set_requires_grad(self.clip_model , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Optional[Union[str, int]] = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory _lowerCAmelCase :Any = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: List[Any] ): self.enable_attention_slicing(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Any ): set_requires_grad(self.vae , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ): set_requires_grad(self.vae , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Any ): set_requires_grad(self.unet , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: List[Any] ): set_requires_grad(self.unet , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Dict ): # get the original timestep using init_timestep _lowerCAmelCase :Optional[Any] = min(int(num_inference_steps * strength ) , _UpperCAmelCase ) _lowerCAmelCase :List[str] = max(num_inference_steps - init_timestep , 0 ) _lowerCAmelCase :Tuple = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Union[str, Any]=None ): if not isinstance(_UpperCAmelCase , torch.Tensor ): raise ValueError(f"""`image` has to be of type `torch.Tensor` but is {type(_UpperCAmelCase )}""" ) _lowerCAmelCase :Union[str, Any] = image.to(device=_UpperCAmelCase , dtype=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ): _lowerCAmelCase :List[Any] = [ self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_UpperCAmelCase ) ] _lowerCAmelCase :List[str] = torch.cat(_UpperCAmelCase , dim=0 ) else: _lowerCAmelCase :List[str] = self.vae.encode(_UpperCAmelCase ).latent_dist.sample(_UpperCAmelCase ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor _lowerCAmelCase :List[Any] = 0.1_8_2_1_5 * init_latents _lowerCAmelCase :List[Any] = init_latents.repeat_interleave(_UpperCAmelCase , dim=0 ) _lowerCAmelCase :Dict = randn_tensor(init_latents.shape , generator=_UpperCAmelCase , device=_UpperCAmelCase , dtype=_UpperCAmelCase ) # get latents _lowerCAmelCase :Dict = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) _lowerCAmelCase :List[str] = init_latents return latents def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Union[str, Any] ): _lowerCAmelCase :Optional[int] = self.coca_transform(_UpperCAmelCase ).unsqueeze(0 ) with torch.no_grad(), torch.cuda.amp.autocast(): _lowerCAmelCase :Optional[Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) ) _lowerCAmelCase :int = self.coca_tokenizer.decode(generated[0].cpu().numpy() ) return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' ) def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: List[str] ): _lowerCAmelCase :Optional[int] = self.feature_extractor.preprocess(_UpperCAmelCase ) _lowerCAmelCase :List[Any] = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half() _lowerCAmelCase :List[str] = self.clip_model.get_image_features(_UpperCAmelCase ) _lowerCAmelCase :List[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_UpperCAmelCase ) _lowerCAmelCase :Dict = image_embeddings_clip.repeat_interleave(_UpperCAmelCase , dim=0 ) return image_embeddings_clip @torch.enable_grad() def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: List[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple , _UpperCAmelCase: Dict , _UpperCAmelCase: str , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple , ): _lowerCAmelCase :Dict = latents.detach().requires_grad_() _lowerCAmelCase :Optional[Any] = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase ) # predict the noise residual _lowerCAmelCase :Optional[Any] = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ): _lowerCAmelCase :int = self.scheduler.alphas_cumprod[timestep] _lowerCAmelCase :Optional[int] = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _lowerCAmelCase :str = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 _lowerCAmelCase :Optional[Any] = torch.sqrt(_UpperCAmelCase ) _lowerCAmelCase :List[str] = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler , _UpperCAmelCase ): _lowerCAmelCase :Dict = self.scheduler.sigmas[index] _lowerCAmelCase :Optional[Any] = latents - sigma * noise_pred else: raise ValueError(f"""scheduler type {type(self.scheduler )} not supported""" ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor _lowerCAmelCase :Tuple = 1 / 0.1_8_2_1_5 * sample _lowerCAmelCase :Optional[Any] = self.vae.decode(_UpperCAmelCase ).sample _lowerCAmelCase :List[Any] = (image / 2 + 0.5).clamp(0 , 1 ) _lowerCAmelCase :Tuple = transforms.Resize(self.feature_extractor_size )(_UpperCAmelCase ) _lowerCAmelCase :Tuple = self.normalize(_UpperCAmelCase ).to(latents.dtype ) _lowerCAmelCase :List[Any] = self.clip_model.get_image_features(_UpperCAmelCase ) _lowerCAmelCase :List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_UpperCAmelCase ) _lowerCAmelCase :Tuple = spherical_dist_loss(_UpperCAmelCase , _UpperCAmelCase ).mean() * clip_guidance_scale _lowerCAmelCase :str = -torch.autograd.grad(_UpperCAmelCase , _UpperCAmelCase )[0] if isinstance(self.scheduler , _UpperCAmelCase ): _lowerCAmelCase :Union[str, Any] = latents.detach() + grads * (sigma**2) _lowerCAmelCase :Dict = noise_pred_original else: _lowerCAmelCase :Optional[int] = noise_pred_original - torch.sqrt(_UpperCAmelCase ) * grads return noise_pred, latents @torch.no_grad() def __call__( self: Optional[int] , _UpperCAmelCase: Union[torch.FloatTensor, PIL.Image.Image] , _UpperCAmelCase: Union[torch.FloatTensor, PIL.Image.Image] , _UpperCAmelCase: Optional[str] = None , _UpperCAmelCase: Optional[str] = None , _UpperCAmelCase: Optional[int] = 512 , _UpperCAmelCase: Optional[int] = 512 , _UpperCAmelCase: float = 0.6 , _UpperCAmelCase: Optional[int] = 50 , _UpperCAmelCase: Optional[float] = 7.5 , _UpperCAmelCase: Optional[int] = 1 , _UpperCAmelCase: float = 0.0 , _UpperCAmelCase: Optional[float] = 100 , _UpperCAmelCase: Optional[torch.Generator] = None , _UpperCAmelCase: Optional[str] = "pil" , _UpperCAmelCase: bool = True , _UpperCAmelCase: float = 0.8 , _UpperCAmelCase: float = 0.1 , _UpperCAmelCase: float = 0.1 , ): if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) != batch_size: raise ValueError(f"""You have passed {batch_size} batch_size, but only {len(_UpperCAmelCase )} generators.""" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" ) if isinstance(_UpperCAmelCase , torch.Generator ) and batch_size > 1: _lowerCAmelCase :int = [generator] + [None] * (batch_size - 1) _lowerCAmelCase :List[Any] = [ ('model', self.coca_model is None), ('tokenizer', self.coca_tokenizer is None), ('transform', self.coca_transform is None), ] _lowerCAmelCase :Optional[int] = [x[0] for x in coca_is_none if x[1]] _lowerCAmelCase :List[str] = ', '.join(_UpperCAmelCase ) # generate prompts with coca model if prompt is None if content_prompt is None: if len(_UpperCAmelCase ): raise ValueError( f"""Content prompt is None and CoCa [{coca_is_none_str}] is None.""" f"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" ) _lowerCAmelCase :List[Any] = self.get_image_description(_UpperCAmelCase ) if style_prompt is None: if len(_UpperCAmelCase ): raise ValueError( f"""Style prompt is None and CoCa [{coca_is_none_str}] is None.""" f""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" ) _lowerCAmelCase :Any = self.get_image_description(_UpperCAmelCase ) # get prompt text embeddings for content and style _lowerCAmelCase :Any = self.tokenizer( _UpperCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_UpperCAmelCase , return_tensors='pt' , ) _lowerCAmelCase :str = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0] _lowerCAmelCase :int = self.tokenizer( _UpperCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_UpperCAmelCase , return_tensors='pt' , ) _lowerCAmelCase :Union[str, Any] = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0] _lowerCAmelCase :List[str] = slerp(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # duplicate text embeddings for each generation per prompt _lowerCAmelCase :str = text_embeddings.repeat_interleave(_UpperCAmelCase , dim=0 ) # set timesteps _lowerCAmelCase :Any = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() ) _lowerCAmelCase :Dict = {} if accepts_offset: _lowerCAmelCase :Optional[int] = 1 self.scheduler.set_timesteps(_UpperCAmelCase , **_UpperCAmelCase ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device ) _lowerCAmelCase , _lowerCAmelCase :List[str] = self.get_timesteps(_UpperCAmelCase , _UpperCAmelCase , self.device ) _lowerCAmelCase :int = timesteps[:1].repeat(_UpperCAmelCase ) # Preprocess image _lowerCAmelCase :Dict = preprocess(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) _lowerCAmelCase :int = self.prepare_latents( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , text_embeddings.dtype , self.device , _UpperCAmelCase ) _lowerCAmelCase :Any = preprocess(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) _lowerCAmelCase :Union[str, Any] = self.prepare_latents( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , text_embeddings.dtype , self.device , _UpperCAmelCase ) _lowerCAmelCase :str = slerp(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if clip_guidance_scale > 0: _lowerCAmelCase :Optional[Any] = self.get_clip_image_embeddings(_UpperCAmelCase , _UpperCAmelCase ) _lowerCAmelCase :Dict = self.get_clip_image_embeddings(_UpperCAmelCase , _UpperCAmelCase ) _lowerCAmelCase :Any = slerp( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. _lowerCAmelCase :int = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: _lowerCAmelCase :Optional[int] = content_text_input.input_ids.shape[-1] _lowerCAmelCase :Union[str, Any] = self.tokenizer([''] , padding='max_length' , max_length=_UpperCAmelCase , return_tensors='pt' ) _lowerCAmelCase :Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt _lowerCAmelCase :Optional[int] = uncond_embeddings.repeat_interleave(_UpperCAmelCase , dim=0 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _lowerCAmelCase :int = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. _lowerCAmelCase :Tuple = (batch_size, self.unet.config.in_channels, height // 8, width // 8) _lowerCAmelCase :Optional[Any] = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps _lowerCAmelCase :Any = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device='cpu' , dtype=_UpperCAmelCase ).to( self.device ) else: _lowerCAmelCase :List[Any] = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase ) else: if latents.shape != latents_shape: raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" ) _lowerCAmelCase :int = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler _lowerCAmelCase :Optional[Any] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] _lowerCAmelCase :Any = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) _lowerCAmelCase :Any = {} if accepts_eta: _lowerCAmelCase :Any = eta # check if the scheduler accepts generator _lowerCAmelCase :List[Any] = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) if accepts_generator: _lowerCAmelCase :List[Any] = generator with self.progress_bar(total=_UpperCAmelCase ): for i, t in enumerate(_UpperCAmelCase ): # expand the latents if we are doing classifier free guidance _lowerCAmelCase :Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _lowerCAmelCase :Tuple = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase ) # predict the noise residual _lowerCAmelCase :Optional[Any] = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample # perform classifier free guidance if do_classifier_free_guidance: _lowerCAmelCase , _lowerCAmelCase :List[str] = noise_pred.chunk(2 ) _lowerCAmelCase :Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: _lowerCAmelCase :List[Any] = ( text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings ) _lowerCAmelCase , _lowerCAmelCase :List[str] = self.cond_fn( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) # compute the previous noisy sample x_t -> x_t-1 _lowerCAmelCase :str = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor _lowerCAmelCase :str = 1 / 0.1_8_2_1_5 * latents _lowerCAmelCase :Any = self.vae.decode(_UpperCAmelCase ).sample _lowerCAmelCase :List[str] = (image / 2 + 0.5).clamp(0 , 1 ) _lowerCAmelCase :Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _lowerCAmelCase :List[Any] = self.numpy_to_pil(_UpperCAmelCase ) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=_UpperCAmelCase , nsfw_content_detected=_UpperCAmelCase )
687
0
import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class __lowercase (snake_case__ , snake_case__ , unittest.TestCase ): """simple docstring""" _UpperCAmelCase = AutoencoderKL _UpperCAmelCase = 'sample' _UpperCAmelCase = 1E-2 @property def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = 4 SCREAMING_SNAKE_CASE_ : int = 3 SCREAMING_SNAKE_CASE_ : List[Any] = (3_2, 3_2) SCREAMING_SNAKE_CASE_ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(_UpperCAmelCase ) return {"sample": image} @property def UpperCamelCase__ ( self ): """simple docstring""" return (3, 3_2, 3_2) @property def UpperCamelCase__ ( self ): """simple docstring""" return (3, 3_2, 3_2) def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = { 'block_out_channels': [3_2, 6_4], 'in_channels': 3, 'out_channels': 3, 'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'], 'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'], 'latent_channels': 4, } SCREAMING_SNAKE_CASE_ : Optional[Any] = self.dummy_input return init_dict, inputs_dict def UpperCamelCase__ ( self ): """simple docstring""" pass def UpperCamelCase__ ( self ): """simple docstring""" pass @unittest.skipIf(torch_device == 'mps' , 'Gradient checkpointing skipped on MPS' ) def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : str = self.prepare_init_args_and_inputs_for_common() SCREAMING_SNAKE_CASE_ : Any = self.model_class(**_UpperCAmelCase ) model.to(_UpperCAmelCase ) assert not model.is_gradient_checkpointing and model.training SCREAMING_SNAKE_CASE_ : str = model(**_UpperCAmelCase ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() SCREAMING_SNAKE_CASE_ : Optional[int] = torch.randn_like(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Optional[Any] = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing SCREAMING_SNAKE_CASE_ : List[str] = self.model_class(**_UpperCAmelCase ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(_UpperCAmelCase ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training SCREAMING_SNAKE_CASE_ : int = model_a(**_UpperCAmelCase ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() SCREAMING_SNAKE_CASE_ : str = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1E-5 ) SCREAMING_SNAKE_CASE_ : str = dict(model.named_parameters() ) SCREAMING_SNAKE_CASE_ : Optional[Any] = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) ) def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' , output_loading_info=_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) self.assertEqual(len(loading_info['missing_keys'] ) , 0 ) model.to(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : List[str] = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' ) SCREAMING_SNAKE_CASE_ : Any = model.to(_UpperCAmelCase ) model.eval() if torch_device == "mps": SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.manual_seed(0 ) else: SCREAMING_SNAKE_CASE_ : Optional[int] = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 ) SCREAMING_SNAKE_CASE_ : Tuple = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) SCREAMING_SNAKE_CASE_ : str = image.to(_UpperCAmelCase ) with torch.no_grad(): SCREAMING_SNAKE_CASE_ : str = model(_UpperCAmelCase , sample_posterior=_UpperCAmelCase , generator=_UpperCAmelCase ).sample SCREAMING_SNAKE_CASE_ : Any = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor( [ -4.0_078E-01, -3.8_323E-04, -1.2_681E-01, -1.1_462E-01, 2.0_095E-01, 1.0_893E-01, -8.8_247E-02, -3.0_361E-01, -9.8_644E-03, ] ) elif torch_device == "cpu": SCREAMING_SNAKE_CASE_ : Dict = torch.tensor( [-0.1_352, 0.0_878, 0.0_419, -0.0_818, -0.1_069, 0.0_688, -0.1_458, -0.4_446, -0.0_026] ) else: SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor( [-0.2_421, 0.4_642, 0.2_507, -0.0_438, 0.0_682, 0.3_160, -0.2_018, -0.0_727, 0.2_485] ) self.assertTrue(torch_all_close(_UpperCAmelCase , _UpperCAmelCase , rtol=1E-2 ) ) @slow class __lowercase (unittest.TestCase ): """simple docstring""" def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ): """simple docstring""" return F'''gaussian_noise_s={seed}_shape={'_'.join([str(_UpperCAmelCase ) for s in shape] )}.npy''' def UpperCamelCase__ ( self ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase__ ( self , lowerCAmelCase__=0 , lowerCAmelCase__=(4, 3, 5_1_2, 5_1_2) , lowerCAmelCase__=False ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = torch.floataa if fpaa else torch.floataa SCREAMING_SNAKE_CASE_ : str = torch.from_numpy(load_hf_numpy(self.get_file_format(_UpperCAmelCase , _UpperCAmelCase ) ) ).to(_UpperCAmelCase ).to(_UpperCAmelCase ) return image def UpperCamelCase__ ( self , lowerCAmelCase__="CompVis/stable-diffusion-v1-4" , lowerCAmelCase__=False ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = 'fp16' if fpaa else None SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.floataa if fpaa else torch.floataa SCREAMING_SNAKE_CASE_ : Optional[int] = AutoencoderKL.from_pretrained( _UpperCAmelCase , subfolder='vae' , torch_dtype=_UpperCAmelCase , revision=_UpperCAmelCase , ) model.to(_UpperCAmelCase ).eval() return model def UpperCamelCase__ ( self , lowerCAmelCase__=0 ): """simple docstring""" if torch_device == "mps": return torch.manual_seed(_UpperCAmelCase ) return torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase ) @parameterized.expand( [ # fmt: off [3_3, [-0.1_603, 0.9_878, -0.0_495, -0.0_790, -0.2_709, 0.8_375, -0.2_060, -0.0_824], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]], [4_7, [-0.2_376, 0.1_168, 0.1_332, -0.4_840, -0.2_508, -0.0_791, -0.0_493, -0.4_089], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]], # fmt: on ] ) def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_sd_vae_model() SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_sd_image(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Any = self.get_generator(_UpperCAmelCase ) with torch.no_grad(): SCREAMING_SNAKE_CASE_ : str = model(_UpperCAmelCase , generator=_UpperCAmelCase , sample_posterior=_UpperCAmelCase ).sample assert sample.shape == image.shape SCREAMING_SNAKE_CASE_ : List[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu() SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice ) assert torch_all_close(_UpperCAmelCase , _UpperCAmelCase , atol=3E-3 ) @parameterized.expand( [ # fmt: off [3_3, [-0.0_513, 0.0_289, 1.3_799, 0.2_166, -0.2_573, -0.0_871, 0.5_103, -0.0_999]], [4_7, [-0.4_128, -0.1_320, -0.3_704, 0.1_965, -0.4_116, -0.2_332, -0.3_340, 0.2_247]], # fmt: on ] ) @require_torch_gpu def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = self.get_sd_vae_model(fpaa=_UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_sd_image(_UpperCAmelCase , fpaa=_UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_generator(_UpperCAmelCase ) with torch.no_grad(): SCREAMING_SNAKE_CASE_ : Optional[int] = model(_UpperCAmelCase , generator=_UpperCAmelCase , sample_posterior=_UpperCAmelCase ).sample assert sample.shape == image.shape SCREAMING_SNAKE_CASE_ : Union[str, Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu() SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.tensor(_UpperCAmelCase ) assert torch_all_close(_UpperCAmelCase , _UpperCAmelCase , atol=1E-2 ) @parameterized.expand( [ # fmt: off [3_3, [-0.1_609, 0.9_866, -0.0_487, -0.0_777, -0.2_716, 0.8_368, -0.2_055, -0.0_814], [-0.2_395, 0.0_098, 0.0_102, -0.0_709, -0.2_840, -0.0_274, -0.0_718, -0.1_824]], [4_7, [-0.2_377, 0.1_147, 0.1_333, -0.4_841, -0.2_506, -0.0_805, -0.0_491, -0.4_085], [0.0_350, 0.0_847, 0.0_467, 0.0_344, -0.0_842, -0.0_547, -0.0_633, -0.1_131]], # fmt: on ] ) def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_sd_vae_model() SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_sd_image(_UpperCAmelCase ) with torch.no_grad(): SCREAMING_SNAKE_CASE_ : str = model(_UpperCAmelCase ).sample assert sample.shape == image.shape SCREAMING_SNAKE_CASE_ : Any = sample[-1, -2:, -2:, :2].flatten().float().cpu() SCREAMING_SNAKE_CASE_ : Dict = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice ) assert torch_all_close(_UpperCAmelCase , _UpperCAmelCase , atol=3E-3 ) @parameterized.expand( [ # fmt: off [1_3, [-0.2_051, -0.1_803, -0.2_311, -0.2_114, -0.3_292, -0.3_574, -0.2_953, -0.3_323]], [3_7, [-0.2_632, -0.2_625, -0.2_199, -0.2_741, -0.4_539, -0.4_990, -0.3_720, -0.4_925]], # fmt: on ] ) @require_torch_gpu def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = self.get_sd_vae_model() SCREAMING_SNAKE_CASE_ : str = self.get_sd_image(_UpperCAmelCase , shape=(3, 4, 6_4, 6_4) ) with torch.no_grad(): SCREAMING_SNAKE_CASE_ : Dict = model.decode(_UpperCAmelCase ).sample assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2] SCREAMING_SNAKE_CASE_ : List[str] = sample[-1, -2:, :2, -2:].flatten().cpu() SCREAMING_SNAKE_CASE_ : Any = torch.tensor(_UpperCAmelCase ) assert torch_all_close(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) @parameterized.expand( [ # fmt: off [2_7, [-0.0_369, 0.0_207, -0.0_776, -0.0_682, -0.1_747, -0.1_930, -0.1_465, -0.2_039]], [1_6, [-0.1_628, -0.2_134, -0.2_747, -0.2_642, -0.3_774, -0.4_404, -0.3_687, -0.4_277]], # fmt: on ] ) @require_torch_gpu def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_sd_vae_model(fpaa=_UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Any = self.get_sd_image(_UpperCAmelCase , shape=(3, 4, 6_4, 6_4) , fpaa=_UpperCAmelCase ) with torch.no_grad(): SCREAMING_SNAKE_CASE_ : Union[str, Any] = model.decode(_UpperCAmelCase ).sample assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2] SCREAMING_SNAKE_CASE_ : Optional[Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu() SCREAMING_SNAKE_CASE_ : str = torch.tensor(_UpperCAmelCase ) assert torch_all_close(_UpperCAmelCase , _UpperCAmelCase , atol=5E-3 ) @parameterized.expand([(1_3,), (1_6,), (2_7,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' ) def UpperCamelCase__ ( self , lowerCAmelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = self.get_sd_vae_model(fpaa=_UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_sd_image(_UpperCAmelCase , shape=(3, 4, 6_4, 6_4) , fpaa=_UpperCAmelCase ) with torch.no_grad(): SCREAMING_SNAKE_CASE_ : int = model.decode(_UpperCAmelCase ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): SCREAMING_SNAKE_CASE_ : Tuple = model.decode(_UpperCAmelCase ).sample assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2] assert torch_all_close(_UpperCAmelCase , _UpperCAmelCase , atol=1E-1 ) @parameterized.expand([(1_3,), (1_6,), (3_7,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' ) def UpperCamelCase__ ( self , lowerCAmelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_sd_vae_model() SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_sd_image(_UpperCAmelCase , shape=(3, 4, 6_4, 6_4) ) with torch.no_grad(): SCREAMING_SNAKE_CASE_ : Optional[Any] = model.decode(_UpperCAmelCase ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): SCREAMING_SNAKE_CASE_ : Union[str, Any] = model.decode(_UpperCAmelCase ).sample assert list(sample.shape ) == [3, 3, 5_1_2, 5_1_2] assert torch_all_close(_UpperCAmelCase , _UpperCAmelCase , atol=1E-2 ) @parameterized.expand( [ # fmt: off [3_3, [-0.3_001, 0.0_918, -2.6_984, -3.9_720, -3.2_099, -5.0_353, 1.7_338, -0.2_065, 3.4_267]], [4_7, [-1.5_030, -4.3_871, -6.0_355, -9.1_157, -1.6_661, -2.7_853, 2.1_607, -5.0_823, 2.5_633]], # fmt: on ] ) def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = self.get_sd_vae_model() SCREAMING_SNAKE_CASE_ : List[str] = self.get_sd_image(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : str = self.get_generator(_UpperCAmelCase ) with torch.no_grad(): SCREAMING_SNAKE_CASE_ : int = model.encode(_UpperCAmelCase ).latent_dist SCREAMING_SNAKE_CASE_ : int = dist.sample(generator=_UpperCAmelCase ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] SCREAMING_SNAKE_CASE_ : str = sample[0, -1, -3:, -3:].flatten().cpu() SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor(_UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : str = 3E-3 if torch_device != 'mps' else 1E-2 assert torch_all_close(_UpperCAmelCase , _UpperCAmelCase , atol=_UpperCAmelCase )
101
from __future__ import annotations from collections.abc import Sequence from typing import Literal def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str ): """simple docstring""" _lowerCAmelCase :Optional[int] = list(__magic_name__ ) _lowerCAmelCase :Dict = list(__magic_name__ ) _lowerCAmelCase :Any = 0 for i in range(len(__magic_name__ ) ): if lista[i] != lista[i]: count += 1 _lowerCAmelCase :Union[str, Any] = '_' if count > 1: return False else: return "".join(__magic_name__ ) def UpperCamelCase_( __magic_name__ : list[str] ): """simple docstring""" _lowerCAmelCase :int = [] while True: _lowerCAmelCase :str = ['$'] * len(__magic_name__ ) _lowerCAmelCase :Optional[int] = [] for i in range(len(__magic_name__ ) ): for j in range(i + 1 , len(__magic_name__ ) ): _lowerCAmelCase :int = compare_string(binary[i] , binary[j] ) if k is False: _lowerCAmelCase :str = '*' _lowerCAmelCase :Union[str, Any] = '*' temp.append('X' ) for i in range(len(__magic_name__ ) ): if checka[i] == "$": pi.append(binary[i] ) if len(__magic_name__ ) == 0: return pi _lowerCAmelCase :Any = list(set(__magic_name__ ) ) def UpperCamelCase_( __magic_name__ : int , __magic_name__ : Sequence[float] ): """simple docstring""" _lowerCAmelCase :str = [] for minterm in minterms: _lowerCAmelCase :Any = '' for _ in range(__magic_name__ ): _lowerCAmelCase :Tuple = str(minterm % 2 ) + string minterm //= 2 temp.append(__magic_name__ ) return temp def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str , __magic_name__ : int ): """simple docstring""" _lowerCAmelCase :Optional[Any] = list(__magic_name__ ) _lowerCAmelCase :List[Any] = list(__magic_name__ ) _lowerCAmelCase :Optional[Any] = 0 for i in range(len(__magic_name__ ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def UpperCamelCase_( __magic_name__ : list[list[int]] , __magic_name__ : list[str] ): """simple docstring""" _lowerCAmelCase :str = [] _lowerCAmelCase :List[str] = [0] * len(__magic_name__ ) for i in range(len(chart[0] ) ): _lowerCAmelCase :Dict = 0 _lowerCAmelCase :Optional[Any] = -1 for j in range(len(__magic_name__ ) ): if chart[j][i] == 1: count += 1 _lowerCAmelCase :List[Any] = j if count == 1: _lowerCAmelCase :Dict = 1 for i in range(len(__magic_name__ ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(__magic_name__ ) ): _lowerCAmelCase :Dict = 0 temp.append(prime_implicants[i] ) while True: _lowerCAmelCase :Dict = 0 _lowerCAmelCase :Any = -1 _lowerCAmelCase :Optional[Any] = 0 for i in range(len(__magic_name__ ) ): _lowerCAmelCase :str = chart[i].count(1 ) if count_n > max_n: _lowerCAmelCase :Optional[Any] = count_n _lowerCAmelCase :Dict = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(__magic_name__ ) ): _lowerCAmelCase :str = 0 def UpperCamelCase_( __magic_name__ : list[str] , __magic_name__ : list[str] ): """simple docstring""" _lowerCAmelCase :str = [[0 for x in range(len(__magic_name__ ) )] for x in range(len(__magic_name__ ) )] for i in range(len(__magic_name__ ) ): _lowerCAmelCase :Tuple = prime_implicants[i].count('_' ) for j in range(len(__magic_name__ ) ): if is_for_table(prime_implicants[i] , binary[j] , __magic_name__ ): _lowerCAmelCase :str = 1 return chart def UpperCamelCase_( ): """simple docstring""" _lowerCAmelCase :Tuple = int(input('Enter the no. of variables\n' ) ) _lowerCAmelCase :Tuple = [ float(__magic_name__ ) for x in input( 'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split() ] _lowerCAmelCase :List[str] = decimal_to_binary(__magic_name__ , __magic_name__ ) _lowerCAmelCase :Any = check(__magic_name__ ) print('Prime Implicants are:' ) print(__magic_name__ ) _lowerCAmelCase :List[Any] = prime_implicant_chart(__magic_name__ , __magic_name__ ) _lowerCAmelCase :Tuple = selection(__magic_name__ , __magic_name__ ) print('Essential Prime Implicants are:' ) print(__magic_name__ ) if __name__ == "__main__": import doctest doctest.testmod() main()
687
0
'''simple docstring''' import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class _lowercase ( unittest.TestCase ): def a ( self : List[Any] ) -> Union[str, Any]: __snake_case = 'ylacombe/bark-small' __snake_case = tempfile.mkdtemp() __snake_case = 'en_speaker_1' __snake_case = 'This is a test string' __snake_case = 'speaker_embeddings_path.json' __snake_case = 'speaker_embeddings' def a ( self : str , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[str]: return AutoTokenizer.from_pretrained(self.checkpoint , **_UpperCAmelCase ) def a ( self : Optional[Any] ) -> Optional[int]: shutil.rmtree(self.tmpdirname ) def a ( self : Tuple ) -> Any: __snake_case = self.get_tokenizer() __snake_case = BarkProcessor(tokenizer=_UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) __snake_case = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def a ( self : List[str] ) -> Tuple: __snake_case = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) __snake_case = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) __snake_case = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def a ( self : Optional[Any] ) -> List[str]: __snake_case = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) __snake_case = 35 __snake_case = 2 __snake_case = 8 __snake_case = { 'semantic_prompt': np.ones(_UpperCAmelCase ), 'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ), 'fine_prompt': np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset __snake_case = processor(text=self.input_string , voice_preset=_UpperCAmelCase ) __snake_case = inputs['history_prompt'] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() ) # test loading voice preset from npz file __snake_case = os.path.join(self.tmpdirname , 'file.npz' ) np.savez(_UpperCAmelCase , **_UpperCAmelCase ) __snake_case = processor(text=self.input_string , voice_preset=_UpperCAmelCase ) __snake_case = inputs['history_prompt'] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() ) # test loading voice preset from the hub __snake_case = processor(text=self.input_string , voice_preset=self.voice_preset ) def a ( self : List[Any] ) -> Union[str, Any]: __snake_case = self.get_tokenizer() __snake_case = BarkProcessor(tokenizer=_UpperCAmelCase ) __snake_case = processor(text=self.input_string ) __snake_case = tokenizer( self.input_string , padding='max_length' , max_length=256 , add_special_tokens=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
56
import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py a = """\ @INPROCEEDINGS{Papineni02bleu:a, author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu}, title = {BLEU: a Method for Automatic Evaluation of Machine Translation}, booktitle = {}, year = {2002}, pages = {311--318} } @inproceedings{lin-och-2004-orange, title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\", author = \"Lin, Chin-Yew and Och, Franz Josef\", booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\", month = \"aug 23{--}aug 27\", year = \"2004\", address = \"Geneva, Switzerland\", publisher = \"COLING\", url = \"https://www.aclweb.org/anthology/C04-1072\", pages = \"501--507\", } """ a = """\ BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another. Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation, the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and remains one of the most popular automated and inexpensive metrics. Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations. Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness are not taken into account[citation needed]. BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1 representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional reference translations will increase the BLEU score. """ a = """ Computes BLEU score of translated segments against one or more references. Args: predictions: list of translations to score. Each translation should be tokenized into a list of tokens. references: list of lists of references for each translation. Each reference should be tokenized into a list of tokens. max_order: Maximum n-gram order to use when computing BLEU score. smooth: Whether or not to apply Lin et al. 2004 smoothing. Returns: 'bleu': bleu score, 'precisions': geometric mean of n-gram precisions, 'brevity_penalty': brevity penalty, 'length_ratio': ratio of lengths, 'translation_length': translation_length, 'reference_length': reference_length Examples: >>> predictions = [ ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample ... ] >>> references = [ ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references) ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference) ... ] >>> bleu = datasets.load_metric(\"bleu\") >>> results = bleu.compute(predictions=predictions, references=references) >>> print(results[\"bleu\"]) 1.0 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase_ (datasets.Metric ): """simple docstring""" def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ), 'references': datasets.Sequence( datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ), } ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[ 'https://en.wikipedia.org/wiki/BLEU', 'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213', ] , ) def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: int , _UpperCAmelCase: Optional[int]=4 , _UpperCAmelCase: Optional[int]=False ): _lowerCAmelCase :Any = compute_bleu( reference_corpus=_UpperCAmelCase , translation_corpus=_UpperCAmelCase , max_order=_UpperCAmelCase , smooth=_UpperCAmelCase ) ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) :Tuple = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
687
0
import unittest from .lib import ( Matrix, Vector, axpy, square_zero_matrix, unit_basis_vector, zero_vector, ) class A_ ( unittest.TestCase ): '''simple docstring''' def snake_case__ ( self) -> int: """simple docstring""" _UpperCAmelCase : Any = Vector([1, 2, 3]) self.assertEqual(x.component(0) , 1) self.assertEqual(x.component(2) , 3) _UpperCAmelCase : Optional[Any] = Vector() def snake_case__ ( self) -> Tuple: """simple docstring""" _UpperCAmelCase : int = Vector([0, 0, 0, 0, 0, 1]) self.assertEqual(str(_UpperCAmelCase) , '''(0,0,0,0,0,1)''') def snake_case__ ( self) -> Any: """simple docstring""" _UpperCAmelCase : Tuple = Vector([1, 2, 3, 4]) self.assertEqual(len(_UpperCAmelCase) , 4) def snake_case__ ( self) -> Any: """simple docstring""" _UpperCAmelCase : str = Vector([1, 2]) _UpperCAmelCase : Optional[Any] = Vector([1, 2, 3, 4, 5]) _UpperCAmelCase : int = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) _UpperCAmelCase : Union[str, Any] = Vector([1, -1, 1, -1, 2, -3, 4, -5]) self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3) self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3) self.assertEqual(z.euclidean_length() , 0) self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3) def snake_case__ ( self) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Any = Vector([1, 2, 3]) _UpperCAmelCase : Optional[int] = Vector([1, 1, 1]) self.assertEqual((x + y).component(0) , 2) self.assertEqual((x + y).component(1) , 3) self.assertEqual((x + y).component(2) , 4) def snake_case__ ( self) -> List[Any]: """simple docstring""" _UpperCAmelCase : str = Vector([1, 2, 3]) _UpperCAmelCase : List[str] = Vector([1, 1, 1]) self.assertEqual((x - y).component(0) , 0) self.assertEqual((x - y).component(1) , 1) self.assertEqual((x - y).component(2) , 2) def snake_case__ ( self) -> Optional[Any]: """simple docstring""" _UpperCAmelCase : Tuple = Vector([1, 2, 3]) _UpperCAmelCase : Tuple = Vector([2, -1, 4]) # for test of dot product _UpperCAmelCase : Dict = Vector([1, -2, -1]) self.assertEqual(str(x * 3.0) , '''(3.0,6.0,9.0)''') self.assertEqual((a * b) , 0) def snake_case__ ( self) -> List[str]: """simple docstring""" self.assertEqual(str(zero_vector(10)).count('''0''') , 10) def snake_case__ ( self) -> Optional[int]: """simple docstring""" self.assertEqual(str(unit_basis_vector(3 , 1)) , '''(0,1,0)''') def snake_case__ ( self) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Tuple = Vector([1, 2, 3]) _UpperCAmelCase : int = Vector([1, 0, 1]) self.assertEqual(str(axpy(2 , _UpperCAmelCase , _UpperCAmelCase)) , '''(3,4,7)''') def snake_case__ ( self) -> List[Any]: """simple docstring""" _UpperCAmelCase : str = Vector([1, 0, 0, 0, 0, 0]) _UpperCAmelCase : Any = x.copy() self.assertEqual(str(_UpperCAmelCase) , str(_UpperCAmelCase)) def snake_case__ ( self) -> List[str]: """simple docstring""" _UpperCAmelCase : Optional[int] = Vector([1, 0, 0]) x.change_component(0 , 0) x.change_component(1 , 1) self.assertEqual(str(_UpperCAmelCase) , '''(0,1,0)''') def snake_case__ ( self) -> int: """simple docstring""" _UpperCAmelCase : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) self.assertEqual('''|1,2,3|\n|2,4,5|\n|6,7,8|\n''' , str(_UpperCAmelCase)) def snake_case__ ( self) -> str: """simple docstring""" _UpperCAmelCase : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) _UpperCAmelCase : Tuple = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]] for x in range(a.height()): for y in range(a.width()): self.assertEqual(minors[x][y] , a.minor(_UpperCAmelCase , _UpperCAmelCase)) def snake_case__ ( self) -> List[Any]: """simple docstring""" _UpperCAmelCase : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) _UpperCAmelCase : Any = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]] for x in range(a.height()): for y in range(a.width()): self.assertEqual(cofactors[x][y] , a.cofactor(_UpperCAmelCase , _UpperCAmelCase)) def snake_case__ ( self) -> Any: """simple docstring""" _UpperCAmelCase : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) self.assertEqual(-5 , a.determinant()) def snake_case__ ( self) -> Tuple: """simple docstring""" _UpperCAmelCase : int = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3) _UpperCAmelCase : Optional[int] = Vector([1, 2, 3]) self.assertEqual('''(14,32,50)''' , str(a * x)) self.assertEqual('''|2,4,6|\n|8,10,12|\n|14,16,18|\n''' , str(a * 2)) def snake_case__ ( self) -> int: """simple docstring""" _UpperCAmelCase : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) a.change_component(0 , 2 , 5) self.assertEqual('''|1,2,5|\n|2,4,5|\n|6,7,8|\n''' , str(_UpperCAmelCase)) def snake_case__ ( self) -> Any: """simple docstring""" _UpperCAmelCase : List[str] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) self.assertEqual(7 , a.component(2 , 1) , 0.01) def snake_case__ ( self) -> Dict: """simple docstring""" _UpperCAmelCase : Tuple = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) _UpperCAmelCase : List[Any] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3) self.assertEqual('''|2,4,10|\n|4,8,10|\n|12,14,18|\n''' , str(a + b)) def snake_case__ ( self) -> List[Any]: """simple docstring""" _UpperCAmelCase : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3) _UpperCAmelCase : List[str] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3) self.assertEqual('''|0,0,-4|\n|0,0,0|\n|0,0,-2|\n''' , str(a - b)) def snake_case__ ( self) -> Optional[int]: """simple docstring""" self.assertEqual( '''|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n''' , str(square_zero_matrix(5)) , ) if __name__ == "__main__": unittest.main()
485
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) a = { """configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ """FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""", """FalconForCausalLM""", """FalconModel""", """FalconPreTrainedModel""", """FalconForSequenceClassification""", """FalconForTokenClassification""", """FalconForQuestionAnswering""", ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
687
0
"""simple docstring""" import qiskit def UpperCAmelCase ( A : int , A : int ): '''simple docstring''' _UpperCAmelCase = qiskit.Aer.get_backend('aer_simulator' ) # Create a Quantum Circuit acting on the q register _UpperCAmelCase = qiskit.QuantumCircuit(A , A ) # Apply X (NOT) Gate to Qubits 0 & 1 circuit.x(0 ) circuit.x(1 ) # Map the quantum measurement to the classical bits circuit.measure([0, 1] , [0, 1] ) # Execute the circuit on the qasm simulator _UpperCAmelCase = qiskit.execute(A , A , shots=1000 ) # Return the histogram data of the results of the experiment. return job.result().get_counts(A ) if __name__ == "__main__": lowercase = single_qubit_measure(2, 2) print(F'''Total count for various states are: {counts}''')
573
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" def __init__( self: str , _UpperCAmelCase: str , _UpperCAmelCase: Optional[int]=7 , _UpperCAmelCase: Union[str, Any]=3 , _UpperCAmelCase: int=18 , _UpperCAmelCase: List[Any]=30 , _UpperCAmelCase: List[Any]=400 , _UpperCAmelCase: Optional[Any]=True , _UpperCAmelCase: Any=None , _UpperCAmelCase: Any=True , _UpperCAmelCase: int=None , _UpperCAmelCase: Union[str, Any]=True , ): _lowerCAmelCase :Tuple = size if size is not None else {'shortest_edge': 20} _lowerCAmelCase :str = crop_size if crop_size is not None else {'height': 18, 'width': 18} _lowerCAmelCase :str = parent _lowerCAmelCase :List[Any] = batch_size _lowerCAmelCase :Optional[Any] = num_channels _lowerCAmelCase :Optional[Any] = image_size _lowerCAmelCase :int = min_resolution _lowerCAmelCase :List[str] = max_resolution _lowerCAmelCase :List[str] = do_resize _lowerCAmelCase :Optional[int] = size _lowerCAmelCase :str = do_center_crop _lowerCAmelCase :int = crop_size _lowerCAmelCase :Optional[int] = do_flip_channel_order def SCREAMING_SNAKE_CASE__ ( self: List[Any] ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class UpperCAmelCase_ (snake_case__ , unittest.TestCase ): """simple docstring""" lowerCamelCase : Any = MobileViTImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): _lowerCAmelCase :Optional[Any] = MobileViTImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE__ ( self: str ): return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ): _lowerCAmelCase :str = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize' ) ) self.assertTrue(hasattr(_UpperCAmelCase , 'size' ) ) self.assertTrue(hasattr(_UpperCAmelCase , 'do_center_crop' ) ) self.assertTrue(hasattr(_UpperCAmelCase , 'center_crop' ) ) self.assertTrue(hasattr(_UpperCAmelCase , 'do_flip_channel_order' ) ) def SCREAMING_SNAKE_CASE__ ( self: Any ): _lowerCAmelCase :Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 20} ) self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} ) _lowerCAmelCase :Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'shortest_edge': 42} ) self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} ) def SCREAMING_SNAKE_CASE__ ( self: List[Any] ): pass def SCREAMING_SNAKE_CASE__ ( self: int ): # Initialize image_processing _lowerCAmelCase :Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _lowerCAmelCase :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , Image.Image ) # Test not batched input _lowerCAmelCase :Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _lowerCAmelCase :str = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def SCREAMING_SNAKE_CASE__ ( self: Tuple ): # Initialize image_processing _lowerCAmelCase :int = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _lowerCAmelCase :List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , np.ndarray ) # Test not batched input _lowerCAmelCase :List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _lowerCAmelCase :List[str] = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def SCREAMING_SNAKE_CASE__ ( self: Any ): # Initialize image_processing _lowerCAmelCase :Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _lowerCAmelCase :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , torch.Tensor ) # Test not batched input _lowerCAmelCase :List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _lowerCAmelCase :int = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
687
0
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin class UpperCAmelCase__ ( unittest.TestCase , snake_case__ ): """simple docstring""" def _UpperCAmelCase ( self: Tuple ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase = load_tool("text-classification" ) self.tool.setup() __UpperCAmelCase = load_tool("text-classification" , remote=_UpperCAmelCase ) def _UpperCAmelCase ( self: List[str] ) -> int: '''simple docstring''' __UpperCAmelCase = self.tool("That\'s quite cool" , ["positive", "negative"] ) self.assertEqual(_UpperCAmelCase , "positive" ) def _UpperCAmelCase ( self: Union[str, Any] ) -> Any: '''simple docstring''' __UpperCAmelCase = self.remote_tool("That\'s quite cool" , ["positive", "negative"] ) self.assertEqual(_UpperCAmelCase , "positive" ) def _UpperCAmelCase ( self: Any ) -> str: '''simple docstring''' __UpperCAmelCase = self.tool(text="That\'s quite cool" , labels=["positive", "negative"] ) self.assertEqual(_UpperCAmelCase , "positive" ) def _UpperCAmelCase ( self: Any ) -> List[Any]: '''simple docstring''' __UpperCAmelCase = self.remote_tool(text="That\'s quite cool" , labels=["positive", "negative"] ) self.assertEqual(_UpperCAmelCase , "positive" )
221
import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class UpperCAmelCase_ (datasets.BuilderConfig ): """simple docstring""" lowerCamelCase : Optional[datasets.Features] = None class UpperCAmelCase_ (datasets.ArrowBasedBuilder ): """simple docstring""" lowerCamelCase : Any = PandasConfig def SCREAMING_SNAKE_CASE__ ( self: int ): return datasets.DatasetInfo(features=self.config.features ) def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: List[str] ): if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) _lowerCAmelCase :Dict = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_UpperCAmelCase , (str, list, tuple) ): _lowerCAmelCase :Any = data_files if isinstance(_UpperCAmelCase , _UpperCAmelCase ): _lowerCAmelCase :Dict = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive _lowerCAmelCase :List[Any] = [dl_manager.iter_files(_UpperCAmelCase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )] _lowerCAmelCase :Any = [] for split_name, files in data_files.items(): if isinstance(_UpperCAmelCase , _UpperCAmelCase ): _lowerCAmelCase :str = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive _lowerCAmelCase :Union[str, Any] = [dl_manager.iter_files(_UpperCAmelCase ) for file in files] splits.append(datasets.SplitGenerator(name=_UpperCAmelCase , gen_kwargs={'files': files} ) ) return splits def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: pa.Table ): if self.config.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example _lowerCAmelCase :str = table_cast(_UpperCAmelCase , self.config.features.arrow_schema ) return pa_table def SCREAMING_SNAKE_CASE__ ( self: List[str] , _UpperCAmelCase: Dict ): for i, file in enumerate(itertools.chain.from_iterable(_UpperCAmelCase ) ): with open(_UpperCAmelCase , 'rb' ) as f: _lowerCAmelCase :Optional[Any] = pa.Table.from_pandas(pd.read_pickle(_UpperCAmelCase ) ) yield i, self._cast_table(_UpperCAmelCase )
687
0
'''simple docstring''' from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging SCREAMING_SNAKE_CASE = logging.get_logger(__name__) SCREAMING_SNAKE_CASE = { 'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json', # See all umt5 models at https://huggingface.co/models?filter=umt5 } class UpperCAmelCase_ ( snake_case__ ): """simple docstring""" UpperCamelCase_ = 'umt5' UpperCamelCase_ = ['past_key_values'] def __init__( self : Union[str, Any] , UpperCAmelCase : Tuple=25_0112 , UpperCAmelCase : List[Any]=512 , UpperCAmelCase : Any=64 , UpperCAmelCase : Dict=1024 , UpperCAmelCase : str=8 , UpperCAmelCase : Tuple=None , UpperCAmelCase : List[Any]=6 , UpperCAmelCase : Any=32 , UpperCAmelCase : int=128 , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Dict=1e-6 , UpperCAmelCase : Dict=1.0 , UpperCAmelCase : Tuple="gated-gelu" , UpperCAmelCase : int=True , UpperCAmelCase : List[Any]=True , UpperCAmelCase : List[str]="T5Tokenizer" , UpperCAmelCase : List[str]=True , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : Optional[int]=1 , UpperCAmelCase : Tuple=0 , **UpperCAmelCase : Optional[int] , ) -> Tuple: '''simple docstring''' super().__init__( is_encoder_decoder=_UpperCAmelCase , tokenizer_class=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , **_UpperCAmelCase , ) lowercase : Dict =vocab_size lowercase : str =d_model lowercase : Dict =d_kv lowercase : Dict =d_ff lowercase : Union[str, Any] =num_layers lowercase : Optional[Any] =( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry lowercase : Dict =num_heads lowercase : Optional[int] =relative_attention_num_buckets lowercase : List[Any] =relative_attention_max_distance lowercase : Dict =dropout_rate lowercase : Tuple =layer_norm_epsilon lowercase : Any =initializer_factor lowercase : Any =feed_forward_proj lowercase : Dict =use_cache lowercase : Any =self.feed_forward_proj.split('''-''' ) lowercase : Optional[int] =act_info[-1] lowercase : List[Any] =act_info[0] == 'gated' if len(_UpperCAmelCase ) > 1 and act_info[0] != "gated" or len(_UpperCAmelCase ) > 2: raise ValueError( f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.' '''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ''' '''\'gated-gelu\' or \'relu\'''' ) if feed_forward_proj == "gated-gelu": lowercase : List[Any] ='gelu_new' @property def A__ ( self : Union[str, Any] ) -> str: '''simple docstring''' return self.d_model @property def A__ ( self : Union[str, Any] ) -> Tuple: '''simple docstring''' return self.num_heads @property def A__ ( self : Dict ) -> Any: '''simple docstring''' return self.num_layers class UpperCAmelCase_ ( snake_case__ ): """simple docstring""" @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs def A__ ( self : str ) -> List[Any]: '''simple docstring''' lowercase : Optional[Any] ={ 'input_ids': {0: 'batch', 1: 'encoder_sequence'}, 'attention_mask': {0: 'batch', 1: 'encoder_sequence'}, } if self.use_past: lowercase : int ='past_encoder_sequence + sequence' lowercase : int ={0: 'batch'} lowercase : Tuple ={0: 'batch', 1: 'past_decoder_sequence + sequence'} else: lowercase : Optional[Any] ={0: 'batch', 1: 'decoder_sequence'} lowercase : Optional[Any] ={0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(_UpperCAmelCase , direction='''inputs''' ) return common_inputs @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset def A__ ( self : List[str] ) -> Optional[Any]: '''simple docstring''' return 13 @property def A__ ( self : Union[str, Any] ) -> str: '''simple docstring''' return 5e-4
94
import glob import os import random from string import ascii_lowercase, digits import cva a = """""" a = """""" a = """""" a = 1 # (0 is vertical, 1 is horizontal) def UpperCamelCase_( ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase :Union[str, Any] = get_dataset(__magic_name__ , __magic_name__ ) print('Processing...' ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :str = update_image_and_anno(__magic_name__ , __magic_name__ , __magic_name__ ) for index, image in enumerate(__magic_name__ ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' _lowerCAmelCase :Optional[Any] = random_chars(32 ) _lowerCAmelCase :str = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0] _lowerCAmelCase :Tuple = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}""" cva.imwrite(f"""/{file_root}.jpg""" , __magic_name__ , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f"""Success {index+1}/{len(__magic_name__ )} with {file_name}""" ) _lowerCAmelCase :str = [] for anno in new_annos[index]: _lowerCAmelCase :List[str] = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}""" annos_list.append(__magic_name__ ) with open(f"""/{file_root}.txt""" , 'w' ) as outfile: outfile.write('\n'.join(line for line in annos_list ) ) def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str ): """simple docstring""" _lowerCAmelCase :int = [] _lowerCAmelCase :Union[str, Any] = [] for label_file in glob.glob(os.path.join(__magic_name__ , '*.txt' ) ): _lowerCAmelCase :Optional[int] = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0] with open(__magic_name__ ) as in_file: _lowerCAmelCase :Union[str, Any] = in_file.readlines() _lowerCAmelCase :List[Any] = os.path.join(__magic_name__ , f"""{label_name}.jpg""" ) _lowerCAmelCase :Tuple = [] for obj_list in obj_lists: _lowerCAmelCase :Union[str, Any] = obj_list.rstrip('\n' ).split(' ' ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(__magic_name__ ) labels.append(__magic_name__ ) return img_paths, labels def UpperCamelCase_( __magic_name__ : list , __magic_name__ : list , __magic_name__ : int = 1 ): """simple docstring""" _lowerCAmelCase :str = [] _lowerCAmelCase :Any = [] _lowerCAmelCase :Optional[Any] = [] for idx in range(len(__magic_name__ ) ): _lowerCAmelCase :Optional[int] = [] _lowerCAmelCase :Optional[Any] = img_list[idx] path_list.append(__magic_name__ ) _lowerCAmelCase :List[str] = anno_list[idx] _lowerCAmelCase :Optional[Any] = cva.imread(__magic_name__ ) if flip_type == 1: _lowerCAmelCase :List[Any] = cva.flip(__magic_name__ , __magic_name__ ) for bbox in img_annos: _lowerCAmelCase :List[Any] = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: _lowerCAmelCase :List[str] = cva.flip(__magic_name__ , __magic_name__ ) for bbox in img_annos: _lowerCAmelCase :List[str] = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(__magic_name__ ) new_imgs_list.append(__magic_name__ ) return new_imgs_list, new_annos_lists, path_list def UpperCamelCase_( __magic_name__ : int = 32 ): """simple docstring""" assert number_char > 1, "The number of character should greater than 1" _lowerCAmelCase :str = ascii_lowercase + digits return "".join(random.choice(__magic_name__ ) for _ in range(__magic_name__ ) ) if __name__ == "__main__": main() print("""DONE ✅""")
687
0
import importlib.metadata import warnings from copy import deepcopy from packaging import version from ..utils import logging from .import_utils import is_accelerate_available, is_bitsandbytes_available if is_bitsandbytes_available(): import bitsandbytes as bnb import torch import torch.nn as nn from ..pytorch_utils import ConvaD if is_accelerate_available(): from accelerate import init_empty_weights from accelerate.utils import find_tied_parameters _lowerCamelCase : List[Any] = logging.get_logger(__name__) def _UpperCAmelCase (UpperCamelCase_ : str , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : int=None ): '''simple docstring''' if "." in tensor_name: _lowerCAmelCase : Optional[Any] = tensor_name.split(""".""" ) for split in splits[:-1]: _lowerCAmelCase : Tuple = getattr(UpperCamelCase_ , UpperCamelCase_ ) if new_module is None: raise ValueError(F"{module} has no attribute {split}." ) _lowerCAmelCase : Optional[Any] = new_module _lowerCAmelCase : Optional[int] = splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(F"{module} does not have a parameter or a buffer named {tensor_name}." ) _lowerCAmelCase : str = tensor_name in module._buffers _lowerCAmelCase : Any = getattr(UpperCamelCase_ , UpperCamelCase_ ) if old_value.device == torch.device("""meta""" ) and device not in ["meta", torch.device("""meta""" )] and value is None: raise ValueError(F"{tensor_name} is on the meta device, we need a `value` to put in on {device}." ) _lowerCAmelCase : int = False _lowerCAmelCase : List[Any] = False if is_buffer or not is_bitsandbytes_available(): _lowerCAmelCase : Optional[Any] = False _lowerCAmelCase : Optional[int] = False else: _lowerCAmelCase : int = hasattr(bnb.nn , """Params4bit""" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit ) _lowerCAmelCase : Union[str, Any] = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams ) if is_abit or is_abit: _lowerCAmelCase : List[Any] = module._parameters[tensor_name] if param.device.type != "cuda": if value is None: _lowerCAmelCase : Optional[Any] = old_value.to(UpperCamelCase_ ) elif isinstance(UpperCamelCase_ , torch.Tensor ): _lowerCAmelCase : Any = value.to("""cpu""" ) if value.dtype == torch.inta: _lowerCAmelCase : str = version.parse(importlib.metadata.version("""bitsandbytes""" ) ) > version.parse( """0.37.2""" ) if not is_abit_serializable: raise ValueError( """Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. """ """Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.""" ) else: _lowerCAmelCase : List[Any] = torch.tensor(UpperCamelCase_ , device="""cpu""" ) # Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls , UpperCamelCase_ ) and fpaa_statistics is None: _lowerCAmelCase : Dict = new_value.T _lowerCAmelCase : Dict = old_value.__dict__ if is_abit: _lowerCAmelCase : List[str] = bnb.nn.IntaParams(UpperCamelCase_ , requires_grad=UpperCamelCase_ , **UpperCamelCase_ ).to(UpperCamelCase_ ) elif is_abit: _lowerCAmelCase : Any = bnb.nn.Paramsabit(UpperCamelCase_ , requires_grad=UpperCamelCase_ , **UpperCamelCase_ ).to(UpperCamelCase_ ) _lowerCAmelCase : List[Any] = new_value if fpaa_statistics is not None: setattr(module.weight , """SCB""" , fpaa_statistics.to(UpperCamelCase_ ) ) else: if value is None: _lowerCAmelCase : str = old_value.to(UpperCamelCase_ ) elif isinstance(UpperCamelCase_ , torch.Tensor ): _lowerCAmelCase : Dict = value.to(UpperCamelCase_ ) else: _lowerCAmelCase : Any = torch.tensor(UpperCamelCase_ , device=UpperCamelCase_ ) if is_buffer: _lowerCAmelCase : List[Any] = new_value else: _lowerCAmelCase : Any = nn.Parameter(UpperCamelCase_ , requires_grad=old_value.requires_grad ) _lowerCAmelCase : List[Any] = new_value def _UpperCAmelCase (UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : int=False ): '''simple docstring''' for name, module in model.named_children(): if current_key_name is None: _lowerCAmelCase : Optional[Any] = [] current_key_name.append(UpperCamelCase_ ) if (isinstance(UpperCamelCase_ , nn.Linear ) or isinstance(UpperCamelCase_ , UpperCamelCase_ )) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` if not any(key in """.""".join(UpperCamelCase_ ) for key in modules_to_not_convert ): with init_empty_weights(): if isinstance(UpperCamelCase_ , UpperCamelCase_ ): _lowerCAmelCase : Union[str, Any] = module.weight.shape else: _lowerCAmelCase : str = module.in_features _lowerCAmelCase : Dict = module.out_features if quantization_config.quantization_method() == "llm_int8": _lowerCAmelCase : Dict = bnb.nn.LinearabitLt( UpperCamelCase_ , UpperCamelCase_ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , ) _lowerCAmelCase : Optional[Any] = True else: if ( quantization_config.llm_inta_skip_modules is not None and name in quantization_config.llm_inta_skip_modules ): pass else: _lowerCAmelCase : Tuple = bnb.nn.Linearabit( UpperCamelCase_ , UpperCamelCase_ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , ) _lowerCAmelCase : Tuple = True # Store the module class in case we need to transpose the weight later _lowerCAmelCase : Any = type(UpperCamelCase_ ) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(UpperCamelCase_ ) if len(list(module.children() ) ) > 0: _lowerCAmelCase : str = _replace_with_bnb_linear( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , has_been_replaced=UpperCamelCase_ , ) # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def _UpperCAmelCase (UpperCamelCase_ : str , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Union[str, Any]=None ): '''simple docstring''' _lowerCAmelCase : Any = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert _lowerCAmelCase : Optional[int] = _replace_with_bnb_linear( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) if not has_been_replaced: logger.warning( """You are loading your model in 8bit or 4bit but no linear modules were found in your model.""" """ Please double check your model architecture, or submit an issue on github if you think this is""" """ a bug.""" ) return model def _UpperCAmelCase (*UpperCamelCase_ : int , **UpperCamelCase_ : Any ): '''simple docstring''' warnings.warn( """`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead""" , UpperCamelCase_ , ) return replace_with_bnb_linear(*UpperCamelCase_ , **UpperCamelCase_ ) def _UpperCAmelCase (*UpperCamelCase_ : int , **UpperCamelCase_ : Optional[Any] ): '''simple docstring''' warnings.warn( """`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead""" , UpperCamelCase_ , ) return set_module_quantized_tensor_to_device(*UpperCamelCase_ , **UpperCamelCase_ ) def _UpperCAmelCase (UpperCamelCase_ : int ): '''simple docstring''' _lowerCAmelCase : Dict = deepcopy(UpperCamelCase_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model.tie_weights() _lowerCAmelCase : Tuple = find_tied_parameters(UpperCamelCase_ ) # For compatibility with Accelerate < 0.18 if isinstance(UpperCamelCase_ , UpperCamelCase_ ): _lowerCAmelCase : str = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: _lowerCAmelCase : Any = sum(UpperCamelCase_ , [] ) _lowerCAmelCase : Optional[int] = len(UpperCamelCase_ ) > 0 # Check if it is a base model _lowerCAmelCase : int = not hasattr(UpperCamelCase_ , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head _lowerCAmelCase : List[str] = list(model.named_children() ) _lowerCAmelCase : Union[str, Any] = [list_modules[-1][0]] # add last module together with tied weights _lowerCAmelCase : Union[str, Any] = set(UpperCamelCase_ ) - set(UpperCamelCase_ ) _lowerCAmelCase : Dict = list(set(UpperCamelCase_ ) ) + list(UpperCamelCase_ ) # remove ".weight" from the keys _lowerCAmelCase : Dict = ['.weight', '.bias'] _lowerCAmelCase : int = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: _lowerCAmelCase : List[str] = name.replace(UpperCamelCase_ , """""" ) filtered_module_names.append(UpperCamelCase_ ) return filtered_module_names
429
import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging a = logging.get_logger(__name__) def UpperCamelCase_( __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ): """simple docstring""" _lowerCAmelCase :Optional[Any] = nn.functional.normalize(__magic_name__ ) _lowerCAmelCase :List[str] = nn.functional.normalize(__magic_name__ ) return torch.mm(__magic_name__ , normalized_text_embeds.t() ) class UpperCAmelCase_ (snake_case__ ): """simple docstring""" lowerCamelCase : str = CLIPConfig lowerCamelCase : Any = ['CLIPEncoderLayer'] def __init__( self: Optional[int] , _UpperCAmelCase: CLIPConfig ): super().__init__(_UpperCAmelCase ) _lowerCAmelCase :Any = CLIPVisionModel(config.vision_config ) _lowerCAmelCase :Optional[int] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=_UpperCAmelCase ) _lowerCAmelCase :int = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=_UpperCAmelCase ) _lowerCAmelCase :Any = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=_UpperCAmelCase ) _lowerCAmelCase :str = nn.Parameter(torch.ones(17 ) , requires_grad=_UpperCAmelCase ) _lowerCAmelCase :Optional[int] = nn.Parameter(torch.ones(3 ) , requires_grad=_UpperCAmelCase ) @torch.no_grad() def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Dict ): _lowerCAmelCase :str = self.vision_model(_UpperCAmelCase )[1] # pooled_output _lowerCAmelCase :Union[str, Any] = self.visual_projection(_UpperCAmelCase ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 _lowerCAmelCase :Optional[int] = cosine_distance(_UpperCAmelCase , self.special_care_embeds ).cpu().float().numpy() _lowerCAmelCase :List[str] = cosine_distance(_UpperCAmelCase , self.concept_embeds ).cpu().float().numpy() _lowerCAmelCase :str = [] _lowerCAmelCase :List[Any] = image_embeds.shape[0] for i in range(_UpperCAmelCase ): _lowerCAmelCase :Optional[Any] = {'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images _lowerCAmelCase :List[Any] = 0.0 for concept_idx in range(len(special_cos_dist[0] ) ): _lowerCAmelCase :List[Any] = special_cos_dist[i][concept_idx] _lowerCAmelCase :Dict = self.special_care_embeds_weights[concept_idx].item() _lowerCAmelCase :List[Any] = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img['special_scores'][concept_idx]} ) _lowerCAmelCase :Any = 0.0_1 for concept_idx in range(len(cos_dist[0] ) ): _lowerCAmelCase :Union[str, Any] = cos_dist[i][concept_idx] _lowerCAmelCase :str = self.concept_embeds_weights[concept_idx].item() _lowerCAmelCase :str = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(_UpperCAmelCase ) result.append(_UpperCAmelCase ) _lowerCAmelCase :Any = [len(res['bad_concepts'] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def SCREAMING_SNAKE_CASE__ ( self: str , _UpperCAmelCase: torch.FloatTensor , _UpperCAmelCase: torch.FloatTensor ): _lowerCAmelCase :Optional[int] = self.vision_model(_UpperCAmelCase )[1] # pooled_output _lowerCAmelCase :Union[str, Any] = self.visual_projection(_UpperCAmelCase ) _lowerCAmelCase :Dict = cosine_distance(_UpperCAmelCase , self.special_care_embeds ) _lowerCAmelCase :List[str] = cosine_distance(_UpperCAmelCase , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images _lowerCAmelCase :Any = 0.0 _lowerCAmelCase :Union[str, Any] = special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) _lowerCAmelCase :Tuple = torch.any(special_scores > 0 , dim=1 ) _lowerCAmelCase :List[str] = special_care * 0.0_1 _lowerCAmelCase :Any = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) _lowerCAmelCase :Optional[Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) _lowerCAmelCase :List[str] = torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
687
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class lowerCAmelCase__ ( snake_case__ , unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Tuple = ShapEImgaImgPipeline __UpperCAmelCase : Optional[Any] = ['image'] __UpperCAmelCase : Union[str, Any] = ['image'] __UpperCAmelCase : Dict = [ 'num_images_per_prompt', 'num_inference_steps', 'generator', 'latents', 'guidance_scale', 'frame_size', 'output_type', 'return_dict', ] __UpperCAmelCase : Tuple = False @property def snake_case ( self : Any ): return 3_2 @property def snake_case ( self : Any ): return 3_2 @property def snake_case ( self : Tuple ): return self.time_input_dim * 4 @property def snake_case ( self : Optional[Any] ): return 8 @property def snake_case ( self : List[Any] ): torch.manual_seed(0 ) __lowercase : List[Any] = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=6_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , ) __lowercase : List[str] = CLIPVisionModel(_UpperCAmelCase ) return model @property def snake_case ( self : Dict ): __lowercase : Dict = CLIPImageProcessor( crop_size=2_2_4 , do_center_crop=_UpperCAmelCase , do_normalize=_UpperCAmelCase , do_resize=_UpperCAmelCase , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=2_2_4 , ) return image_processor @property def snake_case ( self : List[Any] ): torch.manual_seed(0 ) __lowercase : Any = { 'num_attention_heads': 2, 'attention_head_dim': 1_6, 'embedding_dim': self.time_input_dim, 'num_embeddings': 3_2, 'embedding_proj_dim': self.text_embedder_hidden_size, 'time_embed_dim': self.time_embed_dim, 'num_layers': 1, 'clip_embed_dim': self.time_input_dim * 2, 'additional_embeddings': 0, 'time_embed_act_fn': 'gelu', 'norm_in_type': 'layer', 'embedding_proj_norm_type': 'layer', 'encoder_hid_proj_type': None, 'added_emb_type': None, } __lowercase : List[Any] = PriorTransformer(**_UpperCAmelCase ) return model @property def snake_case ( self : Optional[Any] ): torch.manual_seed(0 ) __lowercase : int = { 'param_shapes': ( (self.renderer_dim, 9_3), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), 'd_latent': self.time_input_dim, 'd_hidden': self.renderer_dim, 'n_output': 1_2, 'background': ( 0.1, 0.1, 0.1, ), } __lowercase : Union[str, Any] = ShapERenderer(**_UpperCAmelCase ) return model def snake_case ( self : List[str] ): __lowercase : Any = self.dummy_prior __lowercase : Dict = self.dummy_image_encoder __lowercase : Union[str, Any] = self.dummy_image_processor __lowercase : str = self.dummy_renderer __lowercase : Any = HeunDiscreteScheduler( beta_schedule="exp" , num_train_timesteps=1_0_2_4 , prediction_type="sample" , use_karras_sigmas=_UpperCAmelCase , clip_sample=_UpperCAmelCase , clip_sample_range=1.0 , ) __lowercase : List[Any] = { 'prior': prior, 'image_encoder': image_encoder, 'image_processor': image_processor, 'renderer': renderer, 'scheduler': scheduler, } return components def snake_case ( self : Tuple , lowercase__ : Tuple , lowercase__ : Dict=0 ): __lowercase : Any = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase ) if str(_UpperCAmelCase ).startswith("mps" ): __lowercase : int = torch.manual_seed(_UpperCAmelCase ) else: __lowercase : Tuple = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase ) __lowercase : Union[str, Any] = { 'image': input_image, 'generator': generator, 'num_inference_steps': 1, 'frame_size': 3_2, 'output_type': 'np', } return inputs def snake_case ( self : List[Any] ): __lowercase : Optional[int] = 'cpu' __lowercase : List[str] = self.get_dummy_components() __lowercase : Optional[Any] = self.pipeline_class(**_UpperCAmelCase ) __lowercase : Dict = pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) __lowercase : Optional[int] = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) ) __lowercase : str = output.images[0] __lowercase : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (2_0, 3_2, 3_2, 3) __lowercase : Any = np.array( [ 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def snake_case ( self : List[Any] ): # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def snake_case ( self : List[Any] ): __lowercase : Optional[Any] = torch_device == 'cpu' __lowercase : Optional[Any] = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=_UpperCAmelCase , relax_max_difference=_UpperCAmelCase , ) def snake_case ( self : Dict ): __lowercase : int = self.get_dummy_components() __lowercase : Dict = self.pipeline_class(**_UpperCAmelCase ) __lowercase : Tuple = pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) __lowercase : Tuple = 1 __lowercase : Optional[int] = 2 __lowercase : str = self.get_dummy_inputs(_UpperCAmelCase ) for key in inputs.keys(): if key in self.batch_params: __lowercase : List[str] = batch_size * [inputs[key]] __lowercase : Union[str, Any] = pipe(**_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def snake_case ( self : str ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case ( self : List[str] ): __lowercase : List[str] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/corgi.png" ) __lowercase : Tuple = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/test_shap_e_img2img_out.npy" ) __lowercase : Tuple = ShapEImgaImgPipeline.from_pretrained("openai/shap-e-img2img" ) __lowercase : Union[str, Any] = pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) __lowercase : str = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 ) __lowercase : Tuple = pipe( _UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=3.0 , num_inference_steps=6_4 , frame_size=6_4 , output_type="np" , ).images[0] assert images.shape == (2_0, 6_4, 6_4, 3) assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
575
from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance a = 6_3_7_8_1_3_7.0 a = 6_3_5_6_7_5_2.3_1_4_2_4_5 a = 6_378_137 def UpperCamelCase_( __magic_name__ : float , __magic_name__ : float , __magic_name__ : float , __magic_name__ : float ): """simple docstring""" _lowerCAmelCase :List[Any] = (AXIS_A - AXIS_B) / AXIS_A # Parametric latitudes # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude _lowerCAmelCase :Union[str, Any] = atan((1 - flattening) * tan(radians(__magic_name__ ) ) ) _lowerCAmelCase :List[str] = atan((1 - flattening) * tan(radians(__magic_name__ ) ) ) # Compute central angle between two points # using haversine theta. sigma = haversine_distance / equatorial radius _lowerCAmelCase :int = haversine_distance(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) / EQUATORIAL_RADIUS # Intermediate P and Q values _lowerCAmelCase :str = (b_lata + b_lata) / 2 _lowerCAmelCase :Tuple = (b_lata - b_lata) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) _lowerCAmelCase :str = (sin(__magic_name__ ) ** 2) * (cos(__magic_name__ ) ** 2) _lowerCAmelCase :Optional[int] = cos(sigma / 2 ) ** 2 _lowerCAmelCase :List[Any] = (sigma - sin(__magic_name__ )) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) _lowerCAmelCase :Dict = (cos(__magic_name__ ) ** 2) * (sin(__magic_name__ ) ** 2) _lowerCAmelCase :str = sin(sigma / 2 ) ** 2 _lowerCAmelCase :Union[str, Any] = (sigma + sin(__magic_name__ )) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
687
0
"""simple docstring""" import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionTextToImagePipeline from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device lowerCAmelCase_ = False class _snake_case ( unittest.TestCase ): """simple docstring""" pass @nightly @require_torch_gpu class _snake_case ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : Dict): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCAmelCase ( self : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = VersatileDiffusionTextToImagePipeline.from_pretrained("""shi-labs/versatile-diffusion""") # remove text_unet pipe.remove_unused_weights() pipe.to(_UpperCAmelCase) pipe.set_progress_bar_config(disable=_UpperCAmelCase) _SCREAMING_SNAKE_CASE : Dict = 'A painting of a squirrel eating a burger ' _SCREAMING_SNAKE_CASE : str = torch.manual_seed(0) _SCREAMING_SNAKE_CASE : Optional[Any] = pipe( prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""").images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(_UpperCAmelCase) _SCREAMING_SNAKE_CASE : List[str] = VersatileDiffusionTextToImagePipeline.from_pretrained(_UpperCAmelCase) pipe.to(_UpperCAmelCase) pipe.set_progress_bar_config(disable=_UpperCAmelCase) _SCREAMING_SNAKE_CASE : List[str] = generator.manual_seed(0) _SCREAMING_SNAKE_CASE : int = pipe( prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""").images assert np.abs(image - new_image).sum() < 1e-5, "Models don't have the same forward pass" def _lowerCAmelCase ( self : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = VersatileDiffusionTextToImagePipeline.from_pretrained( """shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa) pipe.to(_UpperCAmelCase) pipe.set_progress_bar_config(disable=_UpperCAmelCase) _SCREAMING_SNAKE_CASE : Dict = 'A painting of a squirrel eating a burger ' _SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0) _SCREAMING_SNAKE_CASE : Tuple = pipe( prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="""numpy""").images _SCREAMING_SNAKE_CASE : str = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) _SCREAMING_SNAKE_CASE : List[str] = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
338
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) class UpperCAmelCase_ (snake_case__ ): """simple docstring""" lowerCamelCase : Dict = 'encoder-decoder' lowerCamelCase : Optional[Any] = True def __init__( self: str , **_UpperCAmelCase: int ): super().__init__(**_UpperCAmelCase ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" _lowerCAmelCase :Optional[Any] = kwargs.pop('encoder' ) _lowerCAmelCase :Dict = encoder_config.pop('model_type' ) _lowerCAmelCase :str = kwargs.pop('decoder' ) _lowerCAmelCase :str = decoder_config.pop('model_type' ) from ..auto.configuration_auto import AutoConfig _lowerCAmelCase :str = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase ) _lowerCAmelCase :Tuple = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase ) _lowerCAmelCase :Any = True @classmethod def SCREAMING_SNAKE_CASE__ ( cls: Tuple , _UpperCAmelCase: PretrainedConfig , _UpperCAmelCase: PretrainedConfig , **_UpperCAmelCase: str ): logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' ) _lowerCAmelCase :Dict = True _lowerCAmelCase :List[str] = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Dict ): _lowerCAmelCase :Union[str, Any] = copy.deepcopy(self.__dict__ ) _lowerCAmelCase :Optional[int] = self.encoder.to_dict() _lowerCAmelCase :Union[str, Any] = self.decoder.to_dict() _lowerCAmelCase :List[str] = self.__class__.model_type return output
687
0
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} __UpperCAmelCase = { 'tokenizer_file': { 'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json', }, } __UpperCAmelCase = { 'gpt-neox-20b': 20_48, } class __a ( snake_case__ ): __snake_case : int = VOCAB_FILES_NAMES __snake_case : List[Any] = PRETRAINED_VOCAB_FILES_MAP __snake_case : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __snake_case : List[Any] = ['input_ids', 'attention_mask'] def __init__( self : str , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Tuple=None , UpperCAmelCase : Tuple=None , UpperCAmelCase : Dict="<|endoftext|>" , UpperCAmelCase : Any="<|endoftext|>" , UpperCAmelCase : str="<|endoftext|>" , UpperCAmelCase : Tuple=False , **UpperCAmelCase : Tuple , ): super().__init__( _UpperCAmelCase , _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , unk_token=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , **_UpperCAmelCase , ) lowerCAmelCase_ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , _UpperCAmelCase ) != add_prefix_space: lowerCAmelCase_ : Optional[int] = getattr(_UpperCAmelCase , pre_tok_state.pop("""type""" ) ) lowerCAmelCase_ : Optional[Any] = add_prefix_space lowerCAmelCase_ : List[Any] = pre_tok_class(**_UpperCAmelCase ) lowerCAmelCase_ : List[str] = add_prefix_space def A ( self : List[str] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ): lowerCAmelCase_ : Union[str, Any] = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase ) return tuple(_UpperCAmelCase ) def A ( self : Any , UpperCAmelCase : "Conversation" ): lowerCAmelCase_ : List[str] = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ) + [self.eos_token_id] ) if len(_UpperCAmelCase ) > self.model_max_length: lowerCAmelCase_ : Dict = input_ids[-self.model_max_length :] return input_ids
600
import collections import inspect import unittest from transformers import FocalNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, ) from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCAmelCase_ : """simple docstring""" def __init__( self: int , _UpperCAmelCase: Any , _UpperCAmelCase: Tuple=13 , _UpperCAmelCase: Optional[Any]=32 , _UpperCAmelCase: List[Any]=2 , _UpperCAmelCase: Optional[int]=3 , _UpperCAmelCase: Optional[int]=16 , _UpperCAmelCase: Optional[Any]=[32, 64, 128] , _UpperCAmelCase: Optional[int]=[1, 2, 1] , _UpperCAmelCase: int=[2, 2, 4] , _UpperCAmelCase: List[str]=2 , _UpperCAmelCase: Dict=2.0 , _UpperCAmelCase: Optional[int]=True , _UpperCAmelCase: str=0.0 , _UpperCAmelCase: int=0.0 , _UpperCAmelCase: str=0.1 , _UpperCAmelCase: Dict="gelu" , _UpperCAmelCase: Optional[Any]=False , _UpperCAmelCase: Union[str, Any]=True , _UpperCAmelCase: Union[str, Any]=0.0_2 , _UpperCAmelCase: Optional[int]=1e-5 , _UpperCAmelCase: Optional[int]=True , _UpperCAmelCase: Optional[Any]=None , _UpperCAmelCase: Tuple=True , _UpperCAmelCase: str=10 , _UpperCAmelCase: int=8 , _UpperCAmelCase: List[Any]=["stage1", "stage2"] , _UpperCAmelCase: List[Any]=[1, 2] , ): _lowerCAmelCase :Optional[int] = parent _lowerCAmelCase :Dict = batch_size _lowerCAmelCase :Optional[Any] = image_size _lowerCAmelCase :Optional[Any] = patch_size _lowerCAmelCase :List[Any] = num_channels _lowerCAmelCase :Optional[int] = embed_dim _lowerCAmelCase :List[str] = hidden_sizes _lowerCAmelCase :Union[str, Any] = depths _lowerCAmelCase :int = num_heads _lowerCAmelCase :Any = window_size _lowerCAmelCase :List[Any] = mlp_ratio _lowerCAmelCase :Optional[int] = qkv_bias _lowerCAmelCase :Union[str, Any] = hidden_dropout_prob _lowerCAmelCase :Optional[int] = attention_probs_dropout_prob _lowerCAmelCase :Dict = drop_path_rate _lowerCAmelCase :List[Any] = hidden_act _lowerCAmelCase :Tuple = use_absolute_embeddings _lowerCAmelCase :Optional[int] = patch_norm _lowerCAmelCase :Optional[Any] = layer_norm_eps _lowerCAmelCase :Union[str, Any] = initializer_range _lowerCAmelCase :List[str] = is_training _lowerCAmelCase :str = scope _lowerCAmelCase :Optional[int] = use_labels _lowerCAmelCase :List[Any] = type_sequence_label_size _lowerCAmelCase :Union[str, Any] = encoder_stride _lowerCAmelCase :Optional[int] = out_features _lowerCAmelCase :List[str] = out_indices def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): _lowerCAmelCase :Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCAmelCase :Dict = None if self.use_labels: _lowerCAmelCase :List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase :str = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self: int ): return FocalNetConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple ): _lowerCAmelCase :List[Any] = FocalNetModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _lowerCAmelCase :List[str] = model(_UpperCAmelCase ) _lowerCAmelCase :Union[str, Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) _lowerCAmelCase :List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[Any] ): _lowerCAmelCase :Union[str, Any] = FocalNetBackbone(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _lowerCAmelCase :str = model(_UpperCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] ) # verify backbone works with out_features=None _lowerCAmelCase :Optional[int] = None _lowerCAmelCase :Dict = FocalNetBackbone(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _lowerCAmelCase :Any = model(_UpperCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: int , _UpperCAmelCase: Optional[Any] ): _lowerCAmelCase :Any = FocalNetForMaskedImageModeling(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _lowerCAmelCase :str = model(_UpperCAmelCase ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images _lowerCAmelCase :List[Any] = 1 _lowerCAmelCase :List[Any] = FocalNetForMaskedImageModeling(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _lowerCAmelCase :Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _lowerCAmelCase :int = model(_UpperCAmelCase ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: int , _UpperCAmelCase: Dict , _UpperCAmelCase: Optional[int] ): _lowerCAmelCase :Union[str, Any] = self.type_sequence_label_size _lowerCAmelCase :Dict = FocalNetForImageClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _lowerCAmelCase :Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _lowerCAmelCase :Optional[int] = 1 _lowerCAmelCase :Tuple = FocalNetForImageClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _lowerCAmelCase :Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _lowerCAmelCase :List[str] = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): _lowerCAmelCase :Tuple = self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :str = config_and_inputs _lowerCAmelCase :List[str] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase_ (snake_case__ , snake_case__ , unittest.TestCase ): """simple docstring""" lowerCamelCase : Optional[int] = ( ( FocalNetModel, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetBackbone, ) if is_torch_available() else () ) lowerCamelCase : Optional[Any] = ( {'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification} if is_torch_available() else {} ) lowerCamelCase : Tuple = False lowerCamelCase : Union[str, Any] = False lowerCamelCase : Union[str, Any] = False lowerCamelCase : Any = False lowerCamelCase : List[Any] = False def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): _lowerCAmelCase :Tuple = FocalNetModelTester(self ) _lowerCAmelCase :str = ConfigTester(self , config_class=_UpperCAmelCase , embed_dim=37 , has_text_modality=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: List[str] ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ): return def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): _lowerCAmelCase :List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: int ): _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: List[str] ): _lowerCAmelCase :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: str ): _lowerCAmelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase ) @unittest.skip(reason='FocalNet does not use inputs_embeds' ) def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): pass @unittest.skip(reason='FocalNet does not use feedforward chunking' ) def SCREAMING_SNAKE_CASE__ ( self: str ): pass def SCREAMING_SNAKE_CASE__ ( self: Tuple ): _lowerCAmelCase , _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: _lowerCAmelCase :Optional[Any] = model_class(_UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _lowerCAmelCase :Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) ) def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ): _lowerCAmelCase , _lowerCAmelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: _lowerCAmelCase :Tuple = model_class(_UpperCAmelCase ) _lowerCAmelCase :Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase :int = [*signature.parameters.keys()] _lowerCAmelCase :List[str] = ['pixel_values'] self.assertListEqual(arg_names[:1] , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Any , _UpperCAmelCase: int , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Optional[int] ): _lowerCAmelCase :Union[str, Any] = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() with torch.no_grad(): _lowerCAmelCase :Optional[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) _lowerCAmelCase :List[Any] = outputs.hidden_states _lowerCAmelCase :str = getattr( self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase ) # FocalNet has a different seq_length _lowerCAmelCase :Any = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _lowerCAmelCase :List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) _lowerCAmelCase :List[str] = outputs.reshaped_hidden_states self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :int = reshaped_hidden_states[0].shape _lowerCAmelCase :Optional[int] = ( reshaped_hidden_states[0].view(_UpperCAmelCase , _UpperCAmelCase , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def SCREAMING_SNAKE_CASE__ ( self: List[Any] ): _lowerCAmelCase , _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase :List[str] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes[:-1]: _lowerCAmelCase :Optional[int] = True self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCAmelCase :Dict = True self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ): _lowerCAmelCase , _lowerCAmelCase :str = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase :str = 3 _lowerCAmelCase :Union[str, Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) _lowerCAmelCase :int = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _lowerCAmelCase :Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) _lowerCAmelCase :Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes[:-1]: _lowerCAmelCase :List[str] = True self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCAmelCase :Union[str, Any] = True self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) ) @slow def SCREAMING_SNAKE_CASE__ ( self: int ): for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase :List[Any] = FocalNetModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Tuple ): _lowerCAmelCase , _lowerCAmelCase :int = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase :Optional[int] = _config_zero_init(_UpperCAmelCase ) for model_class in self.all_model_classes: _lowerCAmelCase :str = model_class(config=_UpperCAmelCase ) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @require_vision @require_torch class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" @cached_property def SCREAMING_SNAKE_CASE__ ( self: Dict ): # TODO update organization return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE__ ( self: Any ): _lowerCAmelCase :Tuple = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(_UpperCAmelCase ) _lowerCAmelCase :Union[str, Any] = self.default_image_processor _lowerCAmelCase :Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) _lowerCAmelCase :Any = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase ) # forward pass with torch.no_grad(): _lowerCAmelCase :Dict = model(**_UpperCAmelCase ) # verify the logits _lowerCAmelCase :str = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _UpperCAmelCase ) _lowerCAmelCase :Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(_UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) ) self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 ) @require_torch class UpperCAmelCase_ (snake_case__ , unittest.TestCase ): """simple docstring""" lowerCamelCase : int = (FocalNetBackbone,) if is_torch_available() else () lowerCamelCase : str = FocalNetConfig lowerCamelCase : Union[str, Any] = False def SCREAMING_SNAKE_CASE__ ( self: List[Any] ): _lowerCAmelCase :Any = FocalNetModelTester(self )
687
0
'''simple docstring''' from collections.abc import Generator from math import sin def lowerCamelCase__ ( __lowerCamelCase : bytes ): '''simple docstring''' if len(__lowerCamelCase ) != 3_2: raise ValueError('Input must be of length 32' ) _UpperCAmelCase : Tuple =b'' for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def lowerCamelCase__ ( __lowerCamelCase : int ): '''simple docstring''' if i < 0: raise ValueError('Input must be non-negative' ) _UpperCAmelCase : Dict =format(__lowerCamelCase , '08x' )[-8:] _UpperCAmelCase : Any =b'' for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' ) return little_endian_hex def lowerCamelCase__ ( __lowerCamelCase : bytes ): '''simple docstring''' _UpperCAmelCase : str =b'' for char in message: bit_string += format(__lowerCamelCase , '08b' ).encode('utf-8' ) _UpperCAmelCase : Union[str, Any] =format(len(__lowerCamelCase ) , '064b' ).encode('utf-8' ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(__lowerCamelCase ) % 5_1_2 != 4_4_8: bit_string += b"0" bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] ) return bit_string def lowerCamelCase__ ( __lowerCamelCase : bytes ): '''simple docstring''' if len(__lowerCamelCase ) % 5_1_2 != 0: raise ValueError('Input must have length that\'s a multiple of 512' ) for pos in range(0 , len(__lowerCamelCase ) , 5_1_2 ): _UpperCAmelCase : Optional[int] =bit_string[pos : pos + 5_1_2] _UpperCAmelCase : Tuple =[] for i in range(0 , 5_1_2 , 3_2 ): block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) ) yield block_words def lowerCamelCase__ ( __lowerCamelCase : int ): '''simple docstring''' if i < 0: raise ValueError('Input must be non-negative' ) _UpperCAmelCase : Union[str, Any] =format(__lowerCamelCase , '032b' ) _UpperCAmelCase : Dict ='' for c in i_str: new_str += "1" if c == "0" else "0" return int(__lowerCamelCase , 2 ) def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int ): '''simple docstring''' return (a + b) % 2**3_2 def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int ): '''simple docstring''' if i < 0: raise ValueError('Input must be non-negative' ) if shift < 0: raise ValueError('Shift must be non-negative' ) return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2 def lowerCamelCase__ ( __lowerCamelCase : bytes ): '''simple docstring''' _UpperCAmelCase : Optional[Any] =preprocess(__lowerCamelCase ) _UpperCAmelCase : int =[int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )] # Starting states _UpperCAmelCase : Tuple =0X67_45_23_01 _UpperCAmelCase : List[Any] =0XEF_CD_AB_89 _UpperCAmelCase : List[str] =0X98_BA_DC_FE _UpperCAmelCase : List[str] =0X10_32_54_76 _UpperCAmelCase : List[Any] =[ 7, 1_2, 1_7, 2_2, 7, 1_2, 1_7, 2_2, 7, 1_2, 1_7, 2_2, 7, 1_2, 1_7, 2_2, 5, 9, 1_4, 2_0, 5, 9, 1_4, 2_0, 5, 9, 1_4, 2_0, 5, 9, 1_4, 2_0, 4, 1_1, 1_6, 2_3, 4, 1_1, 1_6, 2_3, 4, 1_1, 1_6, 2_3, 4, 1_1, 1_6, 2_3, 6, 1_0, 1_5, 2_1, 6, 1_0, 1_5, 2_1, 6, 1_0, 1_5, 2_1, 6, 1_0, 1_5, 2_1, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(__lowerCamelCase ): _UpperCAmelCase : Optional[int] =aa _UpperCAmelCase : Optional[int] =ba _UpperCAmelCase : Tuple =ca _UpperCAmelCase : Union[str, Any] =da # Hash current chunk for i in range(6_4 ): if i <= 1_5: # f = (b & c) | (not_32(b) & d) # Alternate definition for f _UpperCAmelCase : Any =d ^ (b & (c ^ d)) _UpperCAmelCase : int =i elif i <= 3_1: # f = (d & b) | (not_32(d) & c) # Alternate definition for f _UpperCAmelCase : Optional[Any] =c ^ (d & (b ^ c)) _UpperCAmelCase : Dict =(5 * i + 1) % 1_6 elif i <= 4_7: _UpperCAmelCase : Optional[int] =b ^ c ^ d _UpperCAmelCase : List[str] =(3 * i + 5) % 1_6 else: _UpperCAmelCase : Any =c ^ (b | not_aa(__lowerCamelCase )) _UpperCAmelCase : Any =(7 * i) % 1_6 _UpperCAmelCase : Dict =(f + a + added_consts[i] + block_words[g]) % 2**3_2 _UpperCAmelCase : List[Any] =d _UpperCAmelCase : Tuple =c _UpperCAmelCase : Any =b _UpperCAmelCase : Union[str, Any] =sum_aa(__lowerCamelCase , left_rotate_aa(__lowerCamelCase , shift_amounts[i] ) ) # Add hashed chunk to running total _UpperCAmelCase : Optional[Any] =sum_aa(__lowerCamelCase , __lowerCamelCase ) _UpperCAmelCase : List[Any] =sum_aa(__lowerCamelCase , __lowerCamelCase ) _UpperCAmelCase : Optional[int] =sum_aa(__lowerCamelCase , __lowerCamelCase ) _UpperCAmelCase : Tuple =sum_aa(__lowerCamelCase , __lowerCamelCase ) _UpperCAmelCase : Tuple =reformat_hex(__lowerCamelCase ) + reformat_hex(__lowerCamelCase ) + reformat_hex(__lowerCamelCase ) + reformat_hex(__lowerCamelCase ) return digest if __name__ == "__main__": import doctest doctest.testmod()
446
import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel a = HfApi() a = {} # fmt: off a = torch.tensor([ -0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7, 1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9, -1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9, 0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7 ]) a = torch.tensor([ -2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6, 1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8, -2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8, 2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5 ]) a = torch.tensor([ -0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9, -0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4, -0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5, 0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3 ]) a = torch.tensor([ 0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2, -0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9, 0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5, -0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5 ]) a = torch.tensor([ 0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3, -0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5, 0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9, -0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6 ]) a = torch.tensor([ 0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8, -0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0, 0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3, -0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1 ]) a = torch.tensor([ 0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2, -0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8, 0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4, -0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0 ]) a = torch.tensor([ 0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2, -0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0, 0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6, -0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3 ]) a = torch.tensor([ -1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0, 1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3, -2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0, 1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1]) a = torch.tensor([ -1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4, 0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1, -2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9, 1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6 ]) a = torch.tensor([ -1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2, 0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7, -2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1, 1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5 ]) a = torch.tensor([ -2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9, 1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1, -3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1, 3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6 ]) a = torch.tensor([ -2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0, 1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8, -2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5, 2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3 ]) a = torch.tensor([ -2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6, 1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8, -3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0, 3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3 ]) a = torch.tensor([ -1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4, 1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1, -2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9, 1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9 ]) # fmt: on a = api.list_models(filter="""diffusers""") for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": a = """/home/patrick/google_checkpoints/""" + mod.modelId.split("""/""")[-1] print(F'''Started running {mod.modelId}!!!''') if mod.modelId.startswith("""CompVis"""): a = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""") else: a = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) a = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) a = torch.tensor([10] * noise.shape[0]) with torch.no_grad(): a = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1E-3 ) print(F'''{mod.modelId} has passed successfully!!!''')
687
0
import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel lowerCAmelCase__ : Union[str, Any] =HfApi() lowerCAmelCase__ : str ={} # fmt: off lowerCAmelCase__ : Dict =torch.tensor([ -0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7, 1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9, -1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9, 0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7 ]) lowerCAmelCase__ : Tuple =torch.tensor([ -2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6, 1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8, -2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8, 2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5 ]) lowerCAmelCase__ : List[Any] =torch.tensor([ -0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9, -0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4, -0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5, 0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3 ]) lowerCAmelCase__ : List[str] =torch.tensor([ 0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2, -0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9, 0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5, -0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5 ]) lowerCAmelCase__ : Tuple =torch.tensor([ 0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3, -0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5, 0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9, -0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6 ]) lowerCAmelCase__ : Dict =torch.tensor([ 0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8, -0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0, 0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3, -0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1 ]) lowerCAmelCase__ : Tuple =torch.tensor([ 0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2, -0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8, 0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4, -0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0 ]) lowerCAmelCase__ : Dict =torch.tensor([ 0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2, -0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0, 0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6, -0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3 ]) lowerCAmelCase__ : str =torch.tensor([ -1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0, 1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3, -2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0, 1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1]) lowerCAmelCase__ : List[str] =torch.tensor([ -1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4, 0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1, -2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9, 1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6 ]) lowerCAmelCase__ : List[str] =torch.tensor([ -1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2, 0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7, -2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1, 1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5 ]) lowerCAmelCase__ : Dict =torch.tensor([ -2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9, 1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1, -3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1, 3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6 ]) lowerCAmelCase__ : Dict =torch.tensor([ -2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0, 1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8, -2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5, 2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3 ]) lowerCAmelCase__ : Dict =torch.tensor([ -2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6, 1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8, -3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0, 3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3 ]) lowerCAmelCase__ : int =torch.tensor([ -1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4, 1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1, -2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9, 1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9 ]) # fmt: on lowerCAmelCase__ : int =api.list_models(filter='diffusers') for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": lowerCAmelCase__ : List[str] ='/home/patrick/google_checkpoints/' + mod.modelId.split('/')[-1] print(F"""Started running {mod.modelId}!!!""") if mod.modelId.startswith('CompVis'): lowerCAmelCase__ : Optional[Any] =UNetaDModel.from_pretrained(local_checkpoint, subfolder='unet') else: lowerCAmelCase__ : int =UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) lowerCAmelCase__ : int =torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) lowerCAmelCase__ : Optional[Any] =torch.tensor([10] * noise.shape[0]) with torch.no_grad(): lowerCAmelCase__ : Any =model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results['_'.join('_'.join(mod.modelId.split('/')).split('-'))], atol=1e-3 ) print(F"""{mod.modelId} has passed successfully!!!""")
101
import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE__ ( self: int ): _lowerCAmelCase :Optional[int] = 10 def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): _lowerCAmelCase :str = [1, 2, 3, 4] _lowerCAmelCase :Union[str, Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: int ): _lowerCAmelCase :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] _lowerCAmelCase :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ): _lowerCAmelCase :Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] _lowerCAmelCase :Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: List[str] ): _lowerCAmelCase :List[str] = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.' _lowerCAmelCase , _lowerCAmelCase :Optional[Any] = process_story(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , [] ) def SCREAMING_SNAKE_CASE__ ( self: Any ): _lowerCAmelCase :Optional[int] = '' _lowerCAmelCase , _lowerCAmelCase :str = process_story(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , [] ) self.assertEqual(_UpperCAmelCase , [] ) def SCREAMING_SNAKE_CASE__ ( self: str ): _lowerCAmelCase :Optional[Any] = ( 'It was the year of Our Lord one thousand seven hundred and ' 'seventy-five\n\nSpiritual revelations were conceded to England ' 'at that favoured period, as at this.\n@highlight\n\nIt was the best of times' ) _lowerCAmelCase , _lowerCAmelCase :Optional[int] = process_story(_UpperCAmelCase ) _lowerCAmelCase :Optional[Any] = [ 'It was the year of Our Lord one thousand seven hundred and seventy-five.', 'Spiritual revelations were conceded to England at that favoured period, as at this.', ] self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) _lowerCAmelCase :Optional[int] = ['It was the best of times.'] self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Tuple ): _lowerCAmelCase :Union[str, Any] = torch.tensor([1, 2, 3, 4] ) _lowerCAmelCase :List[Any] = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 0 ).numpy() , expected.numpy() ) def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ): _lowerCAmelCase :List[Any] = torch.tensor([1, 2, 3, 4, 23, 23, 23] ) _lowerCAmelCase :Optional[int] = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 23 ).numpy() , expected.numpy() ) def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): _lowerCAmelCase :Tuple = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) _lowerCAmelCase :List[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 1 ).numpy() , expected.numpy() ) def SCREAMING_SNAKE_CASE__ ( self: str ): _lowerCAmelCase :List[str] = 101 _lowerCAmelCase :Dict = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] ) _lowerCAmelCase :int = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) _lowerCAmelCase :List[str] = compute_token_type_ids(_UpperCAmelCase , _UpperCAmelCase ) np.testing.assert_array_equal(_UpperCAmelCase , _UpperCAmelCase )
687
0
'''simple docstring''' import contextlib import csv import json import os import sqlitea import tarfile import textwrap import zipfile import pyarrow as pa import pyarrow.parquet as pq import pytest import datasets import datasets.config @pytest.fixture(scope='session' ) def _a () -> Optional[int]: """simple docstring""" __snake_case = 1_0 __snake_case = datasets.Features( { 'tokens': datasets.Sequence(datasets.Value('string' ) ), 'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ), 'answers': datasets.Sequence( { 'text': datasets.Value('string' ), 'answer_start': datasets.Value('int32' ), } ), 'id': datasets.Value('int64' ), } ) __snake_case = datasets.Dataset.from_dict( { 'tokens': [['foo'] * 5] * n, 'labels': [[1] * 5] * n, 'answers': [{'answer_start': [9_7], 'text': ['1976']}] * 1_0, 'id': list(range(lowercase__ ) ), } , features=lowercase__ , ) return dataset @pytest.fixture(scope='session' ) def _a (lowercase__ : Optional[int] , lowercase__ : Dict ) -> List[Any]: """simple docstring""" __snake_case = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' ) dataset.map(cache_file_name=lowercase__ ) return filename # FILE_CONTENT + files _a : Union[str, Any] = "\\n Text data.\n Second line of data." @pytest.fixture(scope='session' ) def _a (lowercase__ : Optional[int] ) -> Optional[Any]: """simple docstring""" __snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt' __snake_case = FILE_CONTENT with open(lowercase__ , 'w' ) as f: f.write(lowercase__ ) return filename @pytest.fixture(scope='session' ) def _a (lowercase__ : Any ) -> int: """simple docstring""" import bza __snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2' __snake_case = bytes(lowercase__ , 'utf-8' ) with bza.open(lowercase__ , 'wb' ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope='session' ) def _a (lowercase__ : Dict ) -> List[str]: """simple docstring""" import gzip __snake_case = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' ) __snake_case = bytes(lowercase__ , 'utf-8' ) with gzip.open(lowercase__ , 'wb' ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope='session' ) def _a (lowercase__ : List[str] ) -> Tuple: """simple docstring""" if datasets.config.LZ4_AVAILABLE: import lza.frame __snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4' __snake_case = bytes(lowercase__ , 'utf-8' ) with lza.frame.open(lowercase__ , 'wb' ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope='session' ) def _a (lowercase__ : List[Any] , lowercase__ : List[Any] ) -> Any: """simple docstring""" if datasets.config.PY7ZR_AVAILABLE: import pyazr __snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.7z' with pyazr.SevenZipFile(lowercase__ , 'w' ) as archive: archive.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope='session' ) def _a (lowercase__ : Tuple , lowercase__ : Tuple ) -> List[str]: """simple docstring""" import tarfile __snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.tar' with tarfile.TarFile(lowercase__ , 'w' ) as f: f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope='session' ) def _a (lowercase__ : Tuple ) -> Optional[Any]: """simple docstring""" import lzma __snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.xz' __snake_case = bytes(lowercase__ , 'utf-8' ) with lzma.open(lowercase__ , 'wb' ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope='session' ) def _a (lowercase__ : Any , lowercase__ : Union[str, Any] ) -> Tuple: """simple docstring""" import zipfile __snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.zip' with zipfile.ZipFile(lowercase__ , 'w' ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope='session' ) def _a (lowercase__ : int ) -> str: """simple docstring""" if datasets.config.ZSTANDARD_AVAILABLE: import zstandard as zstd __snake_case = tmp_path_factory.mktemp('data' ) / 'file.txt.zst' __snake_case = bytes(lowercase__ , 'utf-8' ) with zstd.open(lowercase__ , 'wb' ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope='session' ) def _a (lowercase__ : Tuple ) -> Union[str, Any]: """simple docstring""" __snake_case = tmp_path_factory.mktemp('data' ) / 'file.xml' __snake_case = textwrap.dedent( '\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' ) with open(lowercase__ , 'w' ) as f: f.write(lowercase__ ) return filename _a : Dict = [ {"col_1": "0", "col_2": 0, "col_3": 0.0}, {"col_1": "1", "col_2": 1, "col_3": 1.0}, {"col_1": "2", "col_2": 2, "col_3": 2.0}, {"col_1": "3", "col_2": 3, "col_3": 3.0}, ] _a : Union[str, Any] = [ {"col_1": "4", "col_2": 4, "col_3": 4.0}, {"col_1": "5", "col_2": 5, "col_3": 5.0}, ] _a : Optional[int] = { "col_1": ["0", "1", "2", "3"], "col_2": [0, 1, 2, 3], "col_3": [0.0, 1.0, 2.0, 3.0], } _a : List[Any] = [ {"col_3": 0.0, "col_1": "0", "col_2": 0}, {"col_3": 1.0, "col_1": "1", "col_2": 1}, ] _a : List[str] = [ {"col_1": "s0", "col_2": 0, "col_3": 0.0}, {"col_1": "s1", "col_2": 1, "col_3": 1.0}, {"col_1": "s2", "col_2": 2, "col_3": 2.0}, {"col_1": "s3", "col_2": 3, "col_3": 3.0}, ] @pytest.fixture(scope='session' ) def _a () -> Dict: """simple docstring""" return DATA_DICT_OF_LISTS @pytest.fixture(scope='session' ) def _a (lowercase__ : Optional[int] ) -> Optional[int]: """simple docstring""" __snake_case = datasets.Dataset.from_dict(lowercase__ ) __snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' ) dataset.map(cache_file_name=lowercase__ ) return path @pytest.fixture(scope='session' ) def _a (lowercase__ : Union[str, Any] ) -> Tuple: """simple docstring""" __snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' ) with contextlib.closing(sqlitea.connect(lowercase__ ) ) as con: __snake_case = con.cursor() cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' ) for item in DATA: cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' , tuple(item.values() ) ) con.commit() return path @pytest.fixture(scope='session' ) def _a (lowercase__ : Dict ) -> Optional[int]: """simple docstring""" __snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' ) with open(lowercase__ , 'w' , newline='' ) as f: __snake_case = csv.DictWriter(lowercase__ , fieldnames=['col_1', 'col_2', 'col_3'] ) writer.writeheader() for item in DATA: writer.writerow(lowercase__ ) return path @pytest.fixture(scope='session' ) def _a (lowercase__ : List[str] ) -> Optional[int]: """simple docstring""" __snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' ) with open(lowercase__ , 'w' , newline='' ) as f: __snake_case = csv.DictWriter(lowercase__ , fieldnames=['col_1', 'col_2', 'col_3'] ) writer.writeheader() for item in DATA: writer.writerow(lowercase__ ) return path @pytest.fixture(scope='session' ) def _a (lowercase__ : int , lowercase__ : Optional[int] ) -> str: """simple docstring""" import bza __snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2' with open(lowercase__ , 'rb' ) as f: __snake_case = f.read() # data = bytes(FILE_CONTENT, "utf-8") with bza.open(lowercase__ , 'wb' ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope='session' ) def _a (lowercase__ : str , lowercase__ : Any , lowercase__ : Dict ) -> Tuple: """simple docstring""" __snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip' with zipfile.ZipFile(lowercase__ , 'w' ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope='session' ) def _a (lowercase__ : Optional[int] , lowercase__ : Tuple , lowercase__ : int ) -> Optional[Any]: """simple docstring""" __snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip' with zipfile.ZipFile(lowercase__ , 'w' ) as f: f.write(lowercase__ , arcname=os.path.basename(csv_path.replace('.csv' , '.CSV' ) ) ) f.write(lowercase__ , arcname=os.path.basename(csva_path.replace('.csv' , '.CSV' ) ) ) return path @pytest.fixture(scope='session' ) def _a (lowercase__ : Tuple , lowercase__ : List[str] , lowercase__ : Any ) -> Any: """simple docstring""" __snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip' with zipfile.ZipFile(lowercase__ , 'w' ) as f: f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) ) f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope='session' ) def _a (lowercase__ : List[str] ) -> Optional[Any]: """simple docstring""" __snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' ) __snake_case = pa.schema( { 'col_1': pa.string(), 'col_2': pa.intaa(), 'col_3': pa.floataa(), } ) with open(lowercase__ , 'wb' ) as f: __snake_case = pq.ParquetWriter(lowercase__ , schema=lowercase__ ) __snake_case = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowercase__ ) )] for k in DATA[0]} , schema=lowercase__ ) writer.write_table(lowercase__ ) writer.close() return path @pytest.fixture(scope='session' ) def _a (lowercase__ : int ) -> Optional[int]: """simple docstring""" __snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' ) __snake_case = {'data': DATA} with open(lowercase__ , 'w' ) as f: json.dump(lowercase__ , lowercase__ ) return path @pytest.fixture(scope='session' ) def _a (lowercase__ : Union[str, Any] ) -> Any: """simple docstring""" __snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' ) __snake_case = {'data': DATA_DICT_OF_LISTS} with open(lowercase__ , 'w' ) as f: json.dump(lowercase__ , lowercase__ ) return path @pytest.fixture(scope='session' ) def _a (lowercase__ : str ) -> int: """simple docstring""" __snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' ) with open(lowercase__ , 'w' ) as f: for item in DATA: f.write(json.dumps(lowercase__ ) + '\n' ) return path @pytest.fixture(scope='session' ) def _a (lowercase__ : List[Any] ) -> Dict: """simple docstring""" __snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' ) with open(lowercase__ , 'w' ) as f: for item in DATA: f.write(json.dumps(lowercase__ ) + '\n' ) return path @pytest.fixture(scope='session' ) def _a (lowercase__ : str ) -> Optional[int]: """simple docstring""" __snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' ) with open(lowercase__ , 'w' ) as f: for item in DATA_312: f.write(json.dumps(lowercase__ ) + '\n' ) return path @pytest.fixture(scope='session' ) def _a (lowercase__ : Tuple ) -> List[str]: """simple docstring""" __snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' ) with open(lowercase__ , 'w' ) as f: for item in DATA_STR: f.write(json.dumps(lowercase__ ) + '\n' ) return path @pytest.fixture(scope='session' ) def _a (lowercase__ : Dict , lowercase__ : List[Any] ) -> Optional[int]: """simple docstring""" import gzip __snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' ) with open(lowercase__ , 'rb' ) as orig_file: with gzip.open(lowercase__ , 'wb' ) as zipped_file: zipped_file.writelines(lowercase__ ) return path @pytest.fixture(scope='session' ) def _a (lowercase__ : List[str] , lowercase__ : Any ) -> int: """simple docstring""" import gzip __snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' ) with open(lowercase__ , 'rb' ) as orig_file: with gzip.open(lowercase__ , 'wb' ) as zipped_file: zipped_file.writelines(lowercase__ ) return path @pytest.fixture(scope='session' ) def _a (lowercase__ : Optional[int] , lowercase__ : Dict , lowercase__ : Tuple ) -> Union[str, Any]: """simple docstring""" __snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip' with zipfile.ZipFile(lowercase__ , 'w' ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope='session' ) def _a (lowercase__ : Optional[int] , lowercase__ : Tuple , lowercase__ : List[str] , lowercase__ : int ) -> Union[str, Any]: """simple docstring""" __snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip' with zipfile.ZipFile(lowercase__ , 'w' ) as f: f.write(lowercase__ , arcname=os.path.join('nested' , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope='session' ) def _a (lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : Tuple ) -> List[Any]: """simple docstring""" __snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip' with zipfile.ZipFile(lowercase__ , 'w' ) as f: f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) ) f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope='session' ) def _a (lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : Any ) -> str: """simple docstring""" __snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar' with tarfile.TarFile(lowercase__ , 'w' ) as f: f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.add(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope='session' ) def _a (lowercase__ : int , lowercase__ : int , lowercase__ : List[Any] , lowercase__ : Dict ) -> List[Any]: """simple docstring""" __snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar' with tarfile.TarFile(lowercase__ , 'w' ) as f: f.add(lowercase__ , arcname=os.path.join('nested' , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope='session' ) def _a (lowercase__ : int ) -> Union[str, Any]: """simple docstring""" __snake_case = ['0', '1', '2', '3'] __snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' ) with open(lowercase__ , 'w' ) as f: for item in data: f.write(item + '\n' ) return path @pytest.fixture(scope='session' ) def _a (lowercase__ : int ) -> Optional[Any]: """simple docstring""" __snake_case = ['0', '1', '2', '3'] __snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' ) with open(lowercase__ , 'w' ) as f: for item in data: f.write(item + '\n' ) return path @pytest.fixture(scope='session' ) def _a (lowercase__ : List[Any] ) -> List[str]: """simple docstring""" __snake_case = ['0', '1', '2', '3'] __snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.abc' with open(lowercase__ , 'w' ) as f: for item in data: f.write(item + '\n' ) return path @pytest.fixture(scope='session' ) def _a (lowercase__ : Tuple , lowercase__ : List[str] , lowercase__ : Dict ) -> Union[str, Any]: """simple docstring""" __snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip' with zipfile.ZipFile(lowercase__ , 'w' ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) return path @pytest.fixture(scope='session' ) def _a (lowercase__ : Dict , lowercase__ : Any , lowercase__ : str ) -> str: """simple docstring""" __snake_case = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip' with zipfile.ZipFile(lowercase__ , 'w' ) as f: f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) ) f.write(lowercase__ , arcname=os.path.join('main_dir' , os.path.basename(lowercase__ ) ) ) return path @pytest.fixture(scope='session' ) def _a (lowercase__ : int , lowercase__ : int , lowercase__ : Union[str, Any] ) -> List[Any]: """simple docstring""" __snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip' with zipfile.ZipFile(lowercase__ , 'w' ) as f: f.write(lowercase__ , arcname=os.path.basename('unsupported.ext' ) ) f.write(lowercase__ , arcname=os.path.basename('unsupported_2.ext' ) ) return path @pytest.fixture(scope='session' ) def _a (lowercase__ : List[Any] ) -> Optional[Any]: """simple docstring""" __snake_case = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] ) __snake_case = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' ) with open(lowercase__ , 'w' , encoding='utf-8' ) as f: f.write(lowercase__ ) return path @pytest.fixture(scope='session' ) def _a () -> Optional[int]: """simple docstring""" return os.path.join('tests' , 'features' , 'data' , 'test_image_rgb.jpg' ) @pytest.fixture(scope='session' ) def _a () -> Optional[Any]: """simple docstring""" return os.path.join('tests' , 'features' , 'data' , 'test_audio_44100.wav' ) @pytest.fixture(scope='session' ) def _a (lowercase__ : List[Any] , lowercase__ : Tuple ) -> Dict: """simple docstring""" __snake_case = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip' with zipfile.ZipFile(lowercase__ , 'w' ) as f: f.write(lowercase__ , arcname=os.path.basename(lowercase__ ) ) f.write(lowercase__ , arcname=os.path.basename(lowercase__ ).replace('.jpg' , '2.jpg' ) ) return path @pytest.fixture(scope='session' ) def _a (lowercase__ : str ) -> Union[str, Any]: """simple docstring""" __snake_case = tmp_path_factory.mktemp('data_dir' ) (data_dir / "subdir").mkdir() with open(data_dir / 'subdir' / 'train.txt' , 'w' ) as f: f.write('foo\n' * 1_0 ) with open(data_dir / 'subdir' / 'test.txt' , 'w' ) as f: f.write('bar\n' * 1_0 ) # hidden file with open(data_dir / 'subdir' / '.test.txt' , 'w' ) as f: f.write('bar\n' * 1_0 ) # hidden directory (data_dir / ".subdir").mkdir() with open(data_dir / '.subdir' / 'train.txt' , 'w' ) as f: f.write('foo\n' * 1_0 ) with open(data_dir / '.subdir' / 'test.txt' , 'w' ) as f: f.write('bar\n' * 1_0 ) return data_dir
56
def UpperCamelCase_( __magic_name__ : int ): """simple docstring""" return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number if __name__ == "__main__": print("""Program to check whether a number is a Perfect number or not...""") a = int(input("""Enter number: """).strip()) print(F'''{number} is {'' if perfect(number) else 'not '}a Perfect Number.''')
687
0
import doctest from collections import deque import numpy as np class A_ : '''simple docstring''' def __init__( self) -> Tuple: """simple docstring""" _UpperCAmelCase : List[Any] = [2, 1, 2, -1] _UpperCAmelCase : List[Any] = [1, 2, 3, 4] def snake_case__ ( self) -> Dict: """simple docstring""" _UpperCAmelCase : Optional[int] = len(self.first_signal) _UpperCAmelCase : int = len(self.second_signal) _UpperCAmelCase : Dict = max(_UpperCAmelCase , _UpperCAmelCase) # create a zero matrix of max_length x max_length _UpperCAmelCase : Union[str, Any] = [[0] * max_length for i in range(_UpperCAmelCase)] # fills the smaller signal with zeros to make both signals of same length if length_first_signal < length_second_signal: self.first_signal += [0] * (max_length - length_first_signal) elif length_first_signal > length_second_signal: self.second_signal += [0] * (max_length - length_second_signal) for i in range(_UpperCAmelCase): _UpperCAmelCase : Dict = deque(self.second_signal) rotated_signal.rotate(_UpperCAmelCase) for j, item in enumerate(_UpperCAmelCase): matrix[i][j] += item # multiply the matrix with the first signal _UpperCAmelCase : List[Any] = np.matmul(np.transpose(_UpperCAmelCase) , np.transpose(self.first_signal)) # rounding-off to two decimal places return [round(_UpperCAmelCase , 2) for i in final_signal] if __name__ == "__main__": doctest.testmod()
485
from __future__ import annotations from collections.abc import MutableSequence class UpperCAmelCase_ : """simple docstring""" def __init__( self: List[Any] , _UpperCAmelCase: int , _UpperCAmelCase: MutableSequence[float] ): if len(_UpperCAmelCase ) != degree + 1: raise ValueError( 'The number of coefficients should be equal to the degree + 1.' ) _lowerCAmelCase :list[float] = list(_UpperCAmelCase ) _lowerCAmelCase :Optional[Any] = degree def __add__( self: str , _UpperCAmelCase: Polynomial ): if self.degree > polynomial_a.degree: _lowerCAmelCase :Any = self.coefficients[:] for i in range(polynomial_a.degree + 1 ): coefficients[i] += polynomial_a.coefficients[i] return Polynomial(self.degree , _UpperCAmelCase ) else: _lowerCAmelCase :List[Any] = polynomial_a.coefficients[:] for i in range(self.degree + 1 ): coefficients[i] += self.coefficients[i] return Polynomial(polynomial_a.degree , _UpperCAmelCase ) def __sub__( self: str , _UpperCAmelCase: Polynomial ): return self + polynomial_a * Polynomial(0 , [-1] ) def __neg__( self: Union[str, Any] ): return Polynomial(self.degree , [-c for c in self.coefficients] ) def __mul__( self: int , _UpperCAmelCase: Polynomial ): _lowerCAmelCase :list[float] = [0] * (self.degree + polynomial_a.degree + 1) for i in range(self.degree + 1 ): for j in range(polynomial_a.degree + 1 ): coefficients[i + j] += ( self.coefficients[i] * polynomial_a.coefficients[j] ) return Polynomial(self.degree + polynomial_a.degree , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: int | float ): _lowerCAmelCase :int | float = 0 for i in range(self.degree + 1 ): result += self.coefficients[i] * (substitution**i) return result def __str__( self: Union[str, Any] ): _lowerCAmelCase :Dict = '' for i in range(self.degree , -1 , -1 ): if self.coefficients[i] == 0: continue elif self.coefficients[i] > 0: if polynomial: polynomial += " + " else: polynomial += " - " if i == 0: polynomial += str(abs(self.coefficients[i] ) ) elif i == 1: polynomial += str(abs(self.coefficients[i] ) ) + "x" else: polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_UpperCAmelCase ) return polynomial def __repr__( self: Optional[Any] ): return self.__str__() def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ): _lowerCAmelCase :list[float] = [0] * self.degree for i in range(self.degree ): _lowerCAmelCase :Tuple = self.coefficients[i + 1] * (i + 1) return Polynomial(self.degree - 1 , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: int | float = 0 ): _lowerCAmelCase :list[float] = [0] * (self.degree + 2) _lowerCAmelCase :str = constant for i in range(self.degree + 1 ): _lowerCAmelCase :List[str] = self.coefficients[i] / (i + 1) return Polynomial(self.degree + 1 , _UpperCAmelCase ) def __eq__( self: List[Any] , _UpperCAmelCase: object ): if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): return False if self.degree != polynomial_a.degree: return False for i in range(self.degree + 1 ): if self.coefficients[i] != polynomial_a.coefficients[i]: return False return True def __ne__( self: Optional[Any] , _UpperCAmelCase: object ): return not self.__eq__(_UpperCAmelCase )
687
0
"""simple docstring""" import unittest from transformers import SqueezeBertConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, ) class lowercase__ ( snake_case__ ): '''simple docstring''' def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=False , snake_case=True , snake_case=99 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=64 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , snake_case=2 , snake_case=2 , snake_case=2 , snake_case=2 , snake_case=4 , snake_case=1 , ) -> Optional[Any]: _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_input_mask _UpperCAmelCase = use_token_type_ids _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_act _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = type_vocab_size _UpperCAmelCase = type_sequence_label_size _UpperCAmelCase = initializer_range _UpperCAmelCase = num_labels _UpperCAmelCase = num_choices _UpperCAmelCase = scope _UpperCAmelCase = q_groups _UpperCAmelCase = k_groups _UpperCAmelCase = v_groups _UpperCAmelCase = post_attention_groups _UpperCAmelCase = intermediate_groups _UpperCAmelCase = output_groups def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = None if self.use_input_mask: _UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None if self.use_labels: _UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase_ ( self ) -> str: return SqueezeBertConfig( embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> int: _UpperCAmelCase = SqueezeBertModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _UpperCAmelCase = model(_UpperCAmelCase , _UpperCAmelCase ) _UpperCAmelCase = model(_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Dict: _UpperCAmelCase = SqueezeBertForMaskedLM(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _UpperCAmelCase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Union[str, Any]: _UpperCAmelCase = SqueezeBertForQuestionAnswering(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _UpperCAmelCase = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Optional[int]: _UpperCAmelCase = self.num_labels _UpperCAmelCase = SqueezeBertForSequenceClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _UpperCAmelCase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> str: _UpperCAmelCase = self.num_labels _UpperCAmelCase = SqueezeBertForTokenClassification(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _UpperCAmelCase = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Optional[int]: _UpperCAmelCase = self.num_choices _UpperCAmelCase = SqueezeBertForMultipleChoice(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _UpperCAmelCase = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase_ ( self ) -> Optional[Any]: _UpperCAmelCase = self.prepare_config_and_inputs() (_UpperCAmelCase) = config_and_inputs _UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class lowercase__ ( snake_case__, snake_case__, unittest.TestCase ): '''simple docstring''' _UpperCAmelCase = ( ( SqueezeBertModel, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, ) if is_torch_available() else None ) _UpperCAmelCase = ( { 'feature-extraction': SqueezeBertModel, 'fill-mask': SqueezeBertForMaskedLM, 'question-answering': SqueezeBertForQuestionAnswering, 'text-classification': SqueezeBertForSequenceClassification, 'token-classification': SqueezeBertForTokenClassification, 'zero-shot': SqueezeBertForSequenceClassification, } if is_torch_available() else {} ) _UpperCAmelCase = False _UpperCAmelCase = True _UpperCAmelCase = False def lowerCamelCase_ ( self ) -> Dict: _UpperCAmelCase = SqueezeBertModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=_UpperCAmelCase , dim=37 ) def lowerCamelCase_ ( self ) -> List[str]: self.config_tester.run_common_tests() def lowerCamelCase_ ( self ) -> Optional[Any]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_model(*_UpperCAmelCase ) def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_masked_lm(*_UpperCAmelCase ) def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_question_answering(*_UpperCAmelCase ) def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_UpperCAmelCase ) def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_token_classification(*_UpperCAmelCase ) def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_UpperCAmelCase ) @slow def lowerCamelCase_ ( self ) -> Dict: for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase = SqueezeBertModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) @require_sentencepiece @require_tokenizers @require_torch class lowercase__ ( unittest.TestCase ): '''simple docstring''' @slow def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' ) _UpperCAmelCase = torch.tensor([[1, 29414, 232, 328, 740, 1140, 12695, 69, 13, 1588, 2]] ) _UpperCAmelCase = model(_UpperCAmelCase )[0] _UpperCAmelCase = torch.Size((1, 3) ) self.assertEqual(output.shape , _UpperCAmelCase ) _UpperCAmelCase = torch.tensor([[0.6401, -0.0349, -0.6041]] ) self.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-4 ) )
573
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available a = { """configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ """GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTNeoForCausalLM""", """GPTNeoForQuestionAnswering""", """GPTNeoForSequenceClassification""", """GPTNeoForTokenClassification""", """GPTNeoModel""", """GPTNeoPreTrainedModel""", """load_tf_weights_in_gpt_neo""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ """FlaxGPTNeoForCausalLM""", """FlaxGPTNeoModel""", """FlaxGPTNeoPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neo import ( GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, GPTNeoPreTrainedModel, load_tf_weights_in_gpt_neo, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel else: import sys a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
687
0
def __lowerCAmelCase ( A_ : str , A_ : int ) -> Dict: return [sentence[i : i + ngram_size] for i in range(len(A_ ) - ngram_size + 1 )] if __name__ == "__main__": from doctest import testmod testmod()
221
from __future__ import annotations from decimal import Decimal from math import * # noqa: F403 from sympy import diff def UpperCamelCase_( __magic_name__ : str , __magic_name__ : float | Decimal , __magic_name__ : float = 10**-10 ): """simple docstring""" _lowerCAmelCase :Optional[Any] = a while True: _lowerCAmelCase :str = Decimal(__magic_name__ ) - ( Decimal(eval(__magic_name__ ) ) / Decimal(eval(str(diff(__magic_name__ ) ) ) ) # noqa: S307 ) # This number dictates the accuracy of the answer if abs(eval(__magic_name__ ) ) < precision: # noqa: S307 return float(__magic_name__ ) # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''') # Find root of polynomial print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}''') # Find Square Root of 5 print(F'''The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}''') # Exponential Roots print(F'''The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}''')
687
0
'''simple docstring''' import unittest import numpy as np from transformers import AlbertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.albert.modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, ) class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __init__( self : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int=13 , UpperCAmelCase : str=7 , UpperCAmelCase : Tuple=True , UpperCAmelCase : int=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : Dict=True , UpperCAmelCase : Union[str, Any]=99 , UpperCAmelCase : List[str]=32 , UpperCAmelCase : List[Any]=5 , UpperCAmelCase : str=4 , UpperCAmelCase : Optional[int]=37 , UpperCAmelCase : Union[str, Any]="gelu" , UpperCAmelCase : int=0.1 , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : Any=512 , UpperCAmelCase : Optional[Any]=16 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : List[Any]=0.0_2 , UpperCAmelCase : Optional[int]=4 , ) -> str: '''simple docstring''' lowercase : Dict =parent lowercase : Union[str, Any] =batch_size lowercase : Tuple =seq_length lowercase : str =is_training lowercase : Dict =use_attention_mask lowercase : Union[str, Any] =use_token_type_ids lowercase : Optional[int] =use_labels lowercase : Union[str, Any] =vocab_size lowercase : Union[str, Any] =hidden_size lowercase : Optional[Any] =num_hidden_layers lowercase : List[str] =num_attention_heads lowercase : List[str] =intermediate_size lowercase : Dict =hidden_act lowercase : Optional[Any] =hidden_dropout_prob lowercase : Any =attention_probs_dropout_prob lowercase : str =max_position_embeddings lowercase : Tuple =type_vocab_size lowercase : Tuple =type_sequence_label_size lowercase : Dict =initializer_range lowercase : str =num_choices def A__ ( self : Any ) -> Optional[Any]: '''simple docstring''' lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : Tuple =None if self.use_attention_mask: lowercase : Optional[int] =random_attention_mask([self.batch_size, self.seq_length] ) lowercase : Any =None if self.use_token_type_ids: lowercase : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase : Tuple =AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def A__ ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' lowercase : Optional[Any] =self.prepare_config_and_inputs() lowercase : Tuple =config_and_inputs lowercase : Any ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask} return config, inputs_dict @require_flax class UpperCAmelCase_ ( snake_case__ , unittest.TestCase ): """simple docstring""" UpperCamelCase_ = ( ( FlaxAlbertModel, FlaxAlbertForPreTraining, FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertForQuestionAnswering, ) if is_flax_available() else () ) def A__ ( self : Optional[Any] ) -> Dict: '''simple docstring''' lowercase : int =FlaxAlbertModelTester(self ) @slow def A__ ( self : Optional[int] ) -> Any: '''simple docstring''' for model_class_name in self.all_model_classes: lowercase : Tuple =model_class_name.from_pretrained('''albert-base-v2''' ) lowercase : Tuple =model(np.ones((1, 1) ) ) self.assertIsNotNone(_UpperCAmelCase ) @require_flax class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" @slow def A__ ( self : int ) -> Optional[int]: '''simple docstring''' lowercase : Union[str, Any] =FlaxAlbertModel.from_pretrained('''albert-base-v2''' ) lowercase : Optional[int] =np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) lowercase : int =np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) lowercase : Optional[int] =model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0] lowercase : Union[str, Any] =(1, 11, 768) self.assertEqual(output.shape , _UpperCAmelCase ) lowercase : Tuple =np.array( [[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _UpperCAmelCase , atol=1e-4 ) )
94
import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) a = { """sample_size""": 32, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 2, """num_class_embeds""": 1_000, """block_out_channels""": [32, 64], """attention_head_dim""": 8, """down_block_types""": [ """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """scale_shift""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } a = { """sample_size""": 64, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 3, """num_class_embeds""": 1_000, """block_out_channels""": [192, 192 * 2, 192 * 3, 192 * 4], """attention_head_dim""": 64, """down_block_types""": [ """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """AttnUpBlock2D""", """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """scale_shift""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } a = { """sample_size""": 256, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 2, """num_class_embeds""": None, """block_out_channels""": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4], """attention_head_dim""": 64, """down_block_types""": [ """ResnetDownsampleBlock2D""", """ResnetDownsampleBlock2D""", """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """AttnUpBlock2D""", """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", """ResnetUpsampleBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """default""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } a = { """num_train_timesteps""": 40, """sigma_min""": 0.0_0_2, """sigma_max""": 8_0.0, } a = { """num_train_timesteps""": 201, """sigma_min""": 0.0_0_2, """sigma_max""": 8_0.0, } a = { """num_train_timesteps""": 151, """sigma_min""": 0.0_0_2, """sigma_max""": 8_0.0, } def UpperCamelCase_( __magic_name__ : Dict ): """simple docstring""" if isinstance(__magic_name__ , __magic_name__ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError('boolean value expected' ) def UpperCamelCase_( __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any]=False ): """simple docstring""" _lowerCAmelCase :int = checkpoint[f"""{old_prefix}.in_layers.0.weight"""] _lowerCAmelCase :Union[str, Any] = checkpoint[f"""{old_prefix}.in_layers.0.bias"""] _lowerCAmelCase :str = checkpoint[f"""{old_prefix}.in_layers.2.weight"""] _lowerCAmelCase :Optional[Any] = checkpoint[f"""{old_prefix}.in_layers.2.bias"""] _lowerCAmelCase :str = checkpoint[f"""{old_prefix}.emb_layers.1.weight"""] _lowerCAmelCase :Any = checkpoint[f"""{old_prefix}.emb_layers.1.bias"""] _lowerCAmelCase :str = checkpoint[f"""{old_prefix}.out_layers.0.weight"""] _lowerCAmelCase :List[Any] = checkpoint[f"""{old_prefix}.out_layers.0.bias"""] _lowerCAmelCase :Optional[int] = checkpoint[f"""{old_prefix}.out_layers.3.weight"""] _lowerCAmelCase :Dict = checkpoint[f"""{old_prefix}.out_layers.3.bias"""] if has_skip: _lowerCAmelCase :List[Any] = checkpoint[f"""{old_prefix}.skip_connection.weight"""] _lowerCAmelCase :int = checkpoint[f"""{old_prefix}.skip_connection.bias"""] return new_checkpoint def UpperCamelCase_( __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : List[str] , __magic_name__ : List[str]=None ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Tuple = checkpoint[f"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Any = checkpoint[f"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 ) _lowerCAmelCase :int = checkpoint[f"""{old_prefix}.norm.weight"""] _lowerCAmelCase :Dict = checkpoint[f"""{old_prefix}.norm.bias"""] _lowerCAmelCase :Dict = weight_q.squeeze(-1 ).squeeze(-1 ) _lowerCAmelCase :str = bias_q.squeeze(-1 ).squeeze(-1 ) _lowerCAmelCase :List[str] = weight_k.squeeze(-1 ).squeeze(-1 ) _lowerCAmelCase :Optional[Any] = bias_k.squeeze(-1 ).squeeze(-1 ) _lowerCAmelCase :Tuple = weight_v.squeeze(-1 ).squeeze(-1 ) _lowerCAmelCase :List[Any] = bias_v.squeeze(-1 ).squeeze(-1 ) _lowerCAmelCase :int = ( checkpoint[f"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 ) ) _lowerCAmelCase :Optional[Any] = checkpoint[f"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def UpperCamelCase_( __magic_name__ : str , __magic_name__ : Optional[Any] ): """simple docstring""" _lowerCAmelCase :Union[str, Any] = torch.load(__magic_name__ , map_location='cpu' ) _lowerCAmelCase :List[Any] = {} _lowerCAmelCase :List[str] = checkpoint['time_embed.0.weight'] _lowerCAmelCase :Tuple = checkpoint['time_embed.0.bias'] _lowerCAmelCase :Dict = checkpoint['time_embed.2.weight'] _lowerCAmelCase :Union[str, Any] = checkpoint['time_embed.2.bias'] if unet_config["num_class_embeds"] is not None: _lowerCAmelCase :Union[str, Any] = checkpoint['label_emb.weight'] _lowerCAmelCase :str = checkpoint['input_blocks.0.0.weight'] _lowerCAmelCase :str = checkpoint['input_blocks.0.0.bias'] _lowerCAmelCase :List[Any] = unet_config['down_block_types'] _lowerCAmelCase :Any = unet_config['layers_per_block'] _lowerCAmelCase :List[Any] = unet_config['attention_head_dim'] _lowerCAmelCase :Tuple = unet_config['block_out_channels'] _lowerCAmelCase :List[str] = 1 _lowerCAmelCase :Optional[int] = channels_list[0] for i, layer_type in enumerate(__magic_name__ ): _lowerCAmelCase :Tuple = channels_list[i] _lowerCAmelCase :Optional[Any] = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(__magic_name__ ): _lowerCAmelCase :int = f"""down_blocks.{i}.resnets.{j}""" _lowerCAmelCase :List[Any] = f"""input_blocks.{current_layer}.0""" _lowerCAmelCase :int = True if j == 0 and downsample_block_has_skip else False _lowerCAmelCase :List[Any] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(__magic_name__ ): _lowerCAmelCase :List[str] = f"""down_blocks.{i}.resnets.{j}""" _lowerCAmelCase :Optional[int] = f"""input_blocks.{current_layer}.0""" _lowerCAmelCase :List[str] = True if j == 0 and downsample_block_has_skip else False _lowerCAmelCase :Optional[int] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ ) _lowerCAmelCase :Optional[int] = f"""down_blocks.{i}.attentions.{j}""" _lowerCAmelCase :str = f"""input_blocks.{current_layer}.1""" _lowerCAmelCase :Optional[Any] = convert_attention( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) current_layer += 1 if i != len(__magic_name__ ) - 1: _lowerCAmelCase :Union[str, Any] = f"""down_blocks.{i}.downsamplers.0""" _lowerCAmelCase :Tuple = f"""input_blocks.{current_layer}.0""" _lowerCAmelCase :Optional[int] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) current_layer += 1 _lowerCAmelCase :Dict = current_channels # hardcoded the mid-block for now _lowerCAmelCase :int = 'mid_block.resnets.0' _lowerCAmelCase :Optional[Any] = 'middle_block.0' _lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) _lowerCAmelCase :Optional[int] = 'mid_block.attentions.0' _lowerCAmelCase :Optional[int] = 'middle_block.1' _lowerCAmelCase :List[Any] = convert_attention(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) _lowerCAmelCase :Union[str, Any] = 'mid_block.resnets.1' _lowerCAmelCase :Optional[int] = 'middle_block.2' _lowerCAmelCase :int = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) _lowerCAmelCase :Tuple = 0 _lowerCAmelCase :str = unet_config['up_block_types'] for i, layer_type in enumerate(__magic_name__ ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): _lowerCAmelCase :Optional[Any] = f"""up_blocks.{i}.resnets.{j}""" _lowerCAmelCase :Dict = f"""output_blocks.{current_layer}.0""" _lowerCAmelCase :Any = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ ) current_layer += 1 if i != len(__magic_name__ ) - 1: _lowerCAmelCase :Any = f"""up_blocks.{i}.upsamplers.0""" _lowerCAmelCase :Dict = f"""output_blocks.{current_layer-1}.1""" _lowerCAmelCase :Tuple = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): _lowerCAmelCase :Tuple = f"""up_blocks.{i}.resnets.{j}""" _lowerCAmelCase :List[str] = f"""output_blocks.{current_layer}.0""" _lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ ) _lowerCAmelCase :str = f"""up_blocks.{i}.attentions.{j}""" _lowerCAmelCase :List[Any] = f"""output_blocks.{current_layer}.1""" _lowerCAmelCase :int = convert_attention( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) current_layer += 1 if i != len(__magic_name__ ) - 1: _lowerCAmelCase :Optional[int] = f"""up_blocks.{i}.upsamplers.0""" _lowerCAmelCase :int = f"""output_blocks.{current_layer-1}.2""" _lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) _lowerCAmelCase :str = checkpoint['out.0.weight'] _lowerCAmelCase :Union[str, Any] = checkpoint['out.0.bias'] _lowerCAmelCase :List[Any] = checkpoint['out.2.weight'] _lowerCAmelCase :Dict = checkpoint['out.2.bias'] return new_checkpoint if __name__ == "__main__": a = argparse.ArgumentParser() parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""") parser.add_argument( """--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model.""" ) parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""") a = parser.parse_args() a = strabool(args.class_cond) a = os.path.basename(args.unet_path) print(F'''Checkpoint: {ckpt_name}''') # Get U-Net config if "imagenet64" in ckpt_name: a = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): a = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: a = TEST_UNET_CONFIG else: raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''') if not args.class_cond: a = None a = con_pt_to_diffuser(args.unet_path, unet_config) a = UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: a = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: a = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): a = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''') a = CMStochasticIterativeScheduler(**scheduler_config) a = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
687
0
import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.dataset_dict import IterableDatasetDict from datasets.iterable_dataset import IterableDataset from datasets.load import dataset_module_factory, import_main_class from datasets.utils.file_utils import cached_path _lowerCamelCase : Dict = [ {"dataset": "wikipedia", "config_name": "20220301.de"}, {"dataset": "wikipedia", "config_name": "20220301.en"}, {"dataset": "wikipedia", "config_name": "20220301.fr"}, {"dataset": "wikipedia", "config_name": "20220301.frr"}, {"dataset": "wikipedia", "config_name": "20220301.it"}, {"dataset": "wikipedia", "config_name": "20220301.simple"}, {"dataset": "snli", "config_name": "plain_text"}, {"dataset": "eli5", "config_name": "LFQA_reddit"}, {"dataset": "wiki40b", "config_name": "en"}, {"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"}, {"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"}, {"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"}, {"dataset": "natural_questions", "config_name": "default"}, ] def _UpperCAmelCase (UpperCamelCase_ : List[Any]=True ): '''simple docstring''' if with_config: return [ { "testcase_name": d["dataset"] + "/" + d["config_name"], "dataset": d["dataset"], "config_name": d["config_name"], } for d in DATASETS_ON_HF_GCP ] else: return [ {"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP} ] @parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=snake_case__ ) ) class __snake_case (snake_case__ ): lowerCAmelCase__ = None lowerCAmelCase__ = None def SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] ) -> Any: '''simple docstring''' with TemporaryDirectory() as tmp_dir: _lowerCAmelCase : Dict = dataset_module_factory(_UpperCAmelCase , cache_dir=_UpperCAmelCase ) _lowerCAmelCase : Any = import_main_class(dataset_module.module_path , dataset=_UpperCAmelCase ) _lowerCAmelCase : DatasetBuilder = builder_cls( cache_dir=_UpperCAmelCase , config_name=_UpperCAmelCase , hash=dataset_module.hash , ) _lowerCAmelCase : List[Any] = '/'.join( [ HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=_UpperCAmelCase ).replace(os.sep , """/""" ), config.DATASET_INFO_FILENAME, ] ) _lowerCAmelCase : List[str] = cached_path(_UpperCAmelCase , cache_dir=_UpperCAmelCase ) self.assertTrue(os.path.exists(_UpperCAmelCase ) ) @pytest.mark.integration def _UpperCAmelCase (UpperCamelCase_ : List[Any] ): '''simple docstring''' _lowerCAmelCase : List[str] = tmp_path_factory.mktemp("""test_hf_gcp""" ) / 'test_wikipedia_simple' _lowerCAmelCase : List[str] = dataset_module_factory("""wikipedia""" , cache_dir=UpperCamelCase_ ) _lowerCAmelCase : Union[str, Any] = import_main_class(dataset_module.module_path ) _lowerCAmelCase : DatasetBuilder = builder_cls( cache_dir=UpperCamelCase_ , config_name="""20220301.frr""" , hash=dataset_module.hash , ) # use the HF cloud storage, not the original download_and_prepare that uses apache-beam _lowerCAmelCase : Optional[Any] = None builder_instance.download_and_prepare() _lowerCAmelCase : Union[str, Any] = builder_instance.as_dataset() assert ds @pytest.mark.integration def _UpperCAmelCase (UpperCamelCase_ : Any ): '''simple docstring''' _lowerCAmelCase : Tuple = dataset_module_factory("""wikipedia""" , cache_dir=UpperCamelCase_ ) _lowerCAmelCase : List[str] = import_main_class(dataset_module.module_path , dataset=UpperCamelCase_ ) _lowerCAmelCase : DatasetBuilder = builder_cls( cache_dir=UpperCamelCase_ , config_name="""20220301.frr""" , hash=dataset_module.hash , ) _lowerCAmelCase : int = builder_instance.as_streaming_dataset() assert ds assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) assert "train" in ds assert isinstance(ds["""train"""] , UpperCamelCase_ ) assert next(iter(ds["""train"""] ) )
429
import os import re import shutil import sys import tempfile import unittest import black a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. a = """ \"\"\" Output class for the scheduler's step function output. Args: prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the denoising loop. pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): The predicted denoised sample (x_{0}) based on the model output from the current timestep. `pred_original_sample` can be used to preview progress or for guidance. \"\"\" prev_sample: torch.FloatTensor pred_original_sample: Optional[torch.FloatTensor] = None """ class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE__ ( self: Dict ): _lowerCAmelCase :Optional[Any] = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) ) _lowerCAmelCase :Tuple = self.diffusers_dir shutil.copy( os.path.join(_UpperCAmelCase , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , ) def SCREAMING_SNAKE_CASE__ ( self: List[Any] ): _lowerCAmelCase :str = 'src/diffusers' shutil.rmtree(self.diffusers_dir ) def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Tuple=None ): _lowerCAmelCase :int = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code if overwrite_result is not None: _lowerCAmelCase :Dict = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result _lowerCAmelCase :Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 ) _lowerCAmelCase :List[str] = black.format_str(_UpperCAmelCase , mode=_UpperCAmelCase ) _lowerCAmelCase :Union[str, Any] = os.path.join(self.diffusers_dir , 'new_code.py' ) with open(_UpperCAmelCase , 'w' , newline='\n' ) as f: f.write(_UpperCAmelCase ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(_UpperCAmelCase ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=_UpperCAmelCase ) with open(_UpperCAmelCase , 'r' ) as f: self.assertTrue(f.read() , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ): _lowerCAmelCase :List[str] = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ): # Base copy consistency self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , ) # With no empty line at the end self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , _UpperCAmelCase , ) # Copy consistency with rename self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , _UpperCAmelCase ) , ) # Copy consistency with a really long name _lowerCAmelCase :Optional[int] = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason' self.check_copy_consistency( f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('Bert' , _UpperCAmelCase , _UpperCAmelCase ) , ) # Copy consistency with overwrite self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , _UpperCAmelCase , overwrite_result=re.sub('DDPM' , 'Test' , _UpperCAmelCase ) , )
687
0
"""simple docstring""" import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase__ : """simple docstring""" def __init__( self : Dict , lowercase__ : Optional[Any] , lowercase__ : str=1_3 , lowercase__ : List[str]=7 , lowercase__ : List[Any]=True , lowercase__ : Dict=True , lowercase__ : Dict=True , lowercase__ : Union[str, Any]=True , lowercase__ : List[str]=True , lowercase__ : Any=False , lowercase__ : str=False , lowercase__ : Optional[Any]=False , lowercase__ : List[str]=2 , lowercase__ : List[Any]=9_9 , lowercase__ : Optional[int]=0 , lowercase__ : Optional[int]=3_2 , lowercase__ : int=5 , lowercase__ : int=4 , lowercase__ : Dict=0.1 , lowercase__ : str=0.1 , lowercase__ : List[Any]=5_1_2 , lowercase__ : str=2 , lowercase__ : Dict=0.0_2 , lowercase__ : Dict=2 , lowercase__ : Tuple=4 , lowercase__ : Any="last" , lowercase__ : Optional[Any]=True , lowercase__ : Optional[int]=None , lowercase__ : int=0 , ): __lowercase : Union[str, Any] = parent __lowercase : str = batch_size __lowercase : Tuple = seq_length __lowercase : Tuple = is_training __lowercase : Union[str, Any] = use_input_lengths __lowercase : Tuple = use_token_type_ids __lowercase : Tuple = use_labels __lowercase : Any = gelu_activation __lowercase : Dict = sinusoidal_embeddings __lowercase : Any = causal __lowercase : Optional[int] = asm __lowercase : List[str] = n_langs __lowercase : str = vocab_size __lowercase : Optional[Any] = n_special __lowercase : Tuple = hidden_size __lowercase : Tuple = num_hidden_layers __lowercase : int = num_attention_heads __lowercase : str = hidden_dropout_prob __lowercase : int = attention_probs_dropout_prob __lowercase : str = max_position_embeddings __lowercase : Optional[Any] = type_sequence_label_size __lowercase : Optional[Any] = initializer_range __lowercase : List[str] = num_labels __lowercase : Tuple = num_choices __lowercase : Any = summary_type __lowercase : List[str] = use_proj __lowercase : str = scope __lowercase : Dict = bos_token_id def snake_case ( self : List[Any] ): __lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase : Tuple = random_attention_mask([self.batch_size, self.seq_length] ) __lowercase : str = None if self.use_input_lengths: __lowercase : Union[str, Any] = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length __lowercase : str = None if self.use_token_type_ids: __lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) __lowercase : Union[str, Any] = None __lowercase : Optional[Any] = None __lowercase : Dict = None if self.use_labels: __lowercase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase : List[str] = ids_tensor([self.batch_size] , 2 ).float() __lowercase : Tuple = ids_tensor([self.batch_size] , self.num_choices ) __lowercase : List[str] = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def snake_case ( self : Optional[int] ): return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def snake_case ( self : List[Any] , lowercase__ : Tuple , lowercase__ : Tuple , lowercase__ : Union[str, Any] , lowercase__ : Optional[Any] , lowercase__ : Optional[int] , lowercase__ : str , lowercase__ : Any , lowercase__ : Any , lowercase__ : int , ): __lowercase : Tuple = XLMModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __lowercase : List[str] = model(_UpperCAmelCase , lengths=_UpperCAmelCase , langs=_UpperCAmelCase ) __lowercase : Optional[Any] = model(_UpperCAmelCase , langs=_UpperCAmelCase ) __lowercase : Tuple = model(_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def snake_case ( self : Optional[Any] , lowercase__ : Optional[Any] , lowercase__ : List[str] , lowercase__ : int , lowercase__ : List[str] , lowercase__ : Tuple , lowercase__ : Any , lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : Dict , ): __lowercase : List[Any] = XLMWithLMHeadModel(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __lowercase : Any = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def snake_case ( self : Optional[Any] , lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : Union[str, Any] , lowercase__ : Any , lowercase__ : List[Any] , lowercase__ : Optional[int] , lowercase__ : Optional[int] , lowercase__ : Optional[int] , lowercase__ : Union[str, Any] , ): __lowercase : Tuple = XLMForQuestionAnsweringSimple(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __lowercase : Dict = model(_UpperCAmelCase ) __lowercase : Optional[int] = model(_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase ) __lowercase : Dict = outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def snake_case ( self : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : Any , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : Optional[Any] , lowercase__ : Dict , lowercase__ : Optional[Any] , lowercase__ : Any , lowercase__ : int , ): __lowercase : Tuple = XLMForQuestionAnswering(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __lowercase : str = model(_UpperCAmelCase ) __lowercase : Tuple = model( _UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , cls_index=_UpperCAmelCase , is_impossible=_UpperCAmelCase , p_mask=_UpperCAmelCase , ) __lowercase : Optional[Any] = model( _UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , cls_index=_UpperCAmelCase , is_impossible=_UpperCAmelCase , ) (__lowercase ) : List[str] = result_with_labels.to_tuple() __lowercase : Union[str, Any] = model(_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase ) (__lowercase ) : Optional[Any] = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def snake_case ( self : str , lowercase__ : str , lowercase__ : List[Any] , lowercase__ : Any , lowercase__ : List[str] , lowercase__ : Dict , lowercase__ : List[str] , lowercase__ : str , lowercase__ : Optional[Any] , lowercase__ : Optional[int] , ): __lowercase : Dict = XLMForSequenceClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __lowercase : List[str] = model(_UpperCAmelCase ) __lowercase : Dict = model(_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def snake_case ( self : Dict , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] , lowercase__ : str , lowercase__ : Any , lowercase__ : Any , lowercase__ : Union[str, Any] , lowercase__ : Dict , lowercase__ : Tuple , ): __lowercase : Dict = self.num_labels __lowercase : str = XLMForTokenClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __lowercase : Optional[int] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def snake_case ( self : Tuple , lowercase__ : str , lowercase__ : str , lowercase__ : Tuple , lowercase__ : Any , lowercase__ : Optional[Any] , lowercase__ : List[str] , lowercase__ : Any , lowercase__ : Optional[int] , lowercase__ : int , ): __lowercase : Optional[Any] = self.num_choices __lowercase : Union[str, Any] = XLMForMultipleChoice(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() __lowercase : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowercase : Optional[int] = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def snake_case ( self : List[str] ): __lowercase : int = self.prepare_config_and_inputs() ( __lowercase ) : List[str] = config_and_inputs __lowercase : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths} return config, inputs_dict @require_torch class lowerCAmelCase__ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ): """simple docstring""" __UpperCAmelCase : str = ( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) __UpperCAmelCase : List[Any] = ( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable __UpperCAmelCase : int = ( { 'feature-extraction': XLMModel, 'fill-mask': XLMWithLMHeadModel, 'question-answering': XLMForQuestionAnsweringSimple, 'text-classification': XLMForSequenceClassification, 'text-generation': XLMWithLMHeadModel, 'token-classification': XLMForTokenClassification, 'zero-shot': XLMForSequenceClassification, } if is_torch_available() else {} ) def snake_case ( self : Any , lowercase__ : Union[str, Any] , lowercase__ : Dict , lowercase__ : Union[str, Any] , lowercase__ : List[Any] , lowercase__ : Optional[int] ): if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def snake_case ( self : int , lowercase__ : Optional[int] , lowercase__ : Any , lowercase__ : str=False ): __lowercase : Any = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": __lowercase : str = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase ) __lowercase : Dict = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase ) return inputs_dict def snake_case ( self : Union[str, Any] ): __lowercase : Optional[int] = XLMModelTester(self ) __lowercase : Optional[Any] = ConfigTester(self , config_class=_UpperCAmelCase , emb_dim=3_7 ) def snake_case ( self : Tuple ): self.config_tester.run_common_tests() def snake_case ( self : List[Any] ): __lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*_UpperCAmelCase ) def snake_case ( self : str ): __lowercase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*_UpperCAmelCase ) def snake_case ( self : Optional[int] ): __lowercase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*_UpperCAmelCase ) def snake_case ( self : List[str] ): __lowercase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*_UpperCAmelCase ) def snake_case ( self : Tuple ): __lowercase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*_UpperCAmelCase ) def snake_case ( self : Optional[Any] ): __lowercase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*_UpperCAmelCase ) def snake_case ( self : Optional[Any] ): __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*_UpperCAmelCase ) def snake_case ( self : Optional[int] , lowercase__ : Any , lowercase__ : Union[str, Any] , lowercase__ : Optional[int] , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] , lowercase__ : str=False , lowercase__ : Any=1 ): self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) self.assertListEqual( [isinstance(_UpperCAmelCase , _UpperCAmelCase ) for iter_attentions in attentions] , [True] * len(_UpperCAmelCase ) ) self.assertEqual(len(_UpperCAmelCase ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(_UpperCAmelCase ): # adds PAD dummy token __lowercase : Tuple = min_length + idx + 1 __lowercase : Optional[int] = min_length + idx + 1 __lowercase : Tuple = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_UpperCAmelCase ) ) def snake_case ( self : Optional[int] , lowercase__ : Tuple , lowercase__ : Optional[int] , lowercase__ : Tuple , lowercase__ : str , lowercase__ : str , lowercase__ : List[Any]=False , lowercase__ : List[str]=1 ): self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase ) self.assertListEqual( [isinstance(_UpperCAmelCase , _UpperCAmelCase ) for iter_hidden_states in hidden_states] , [True] * len(_UpperCAmelCase ) , ) self.assertEqual(len(_UpperCAmelCase ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(_UpperCAmelCase ): # adds PAD dummy token __lowercase : List[str] = min_length + idx + 1 __lowercase : Union[str, Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_UpperCAmelCase ) , ) pass @slow def snake_case ( self : Any ): for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase : int = XLMModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) @require_torch class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" @slow def snake_case ( self : str ): __lowercase : Optional[Any] = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" ) model.to(_UpperCAmelCase ) __lowercase : Optional[int] = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=_UpperCAmelCase ) # the president __lowercase : List[str] = [ 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, 1_4, 4_4_7, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference __lowercase : Any = model.generate(_UpperCAmelCase , do_sample=_UpperCAmelCase ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _UpperCAmelCase )
575
from dataclasses import dataclass, field from typing import Optional @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be trained.'} ) lowerCamelCase : Optional[str] = field( default='./' , metadata={'help': 'Save dir where model repo is cloned and models updates are saved to.'} ) lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path of training dataset.'} ) lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} ) lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for training.'} ) lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for evaluation.'} ) lowerCamelCase : Optional[float] = field(default=0.1 , metadata={'help': 'Value of weight decay.'} ) lowerCamelCase : Optional[int] = field( default=1_00_00 , metadata={'help': 'Size of buffer used to shuffle streaming dataset.'} ) lowerCamelCase : Optional[float] = field(default=2e-4 , metadata={'help': 'Learning rate fo training.'} ) lowerCamelCase : Optional[str] = field(default='cosine' , metadata={'help': 'Learning rate.'} ) lowerCamelCase : Optional[int] = field( default=7_50 , metadata={'help': 'Number of warmup steps in the learning rate schedule.'} ) lowerCamelCase : Optional[int] = field( default=16 , metadata={'help': 'Number of gradient accumulation steps.'} ) lowerCamelCase : Optional[bool] = field( default=snake_case__ , metadata={'help': 'Use gradient checkpointing to reduce memory footprint.'} ) lowerCamelCase : Optional[int] = field(default=5_00_00 , metadata={'help': 'Maximum number of training steps.'} ) lowerCamelCase : Optional[int] = field( default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} ) lowerCamelCase : Optional[int] = field(default=10_24 , metadata={'help': 'Sequence lengths used for training.'} ) lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Training seed.'} ) lowerCamelCase : Optional[int] = field( default=10_24 , metadata={'help': 'Interval to save checkpoints. Measured as number of forward passes not training steps.'} , ) lowerCamelCase : Optional[str] = field( default=snake_case__ , metadata={'help': 'States path if the training should continue from a checkpoint folder.'} ) lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'If True the data is pretokenized.'} ) @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} ) lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} ) lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size used for evaluation.'} ) lowerCamelCase : Optional[int] = field( default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} ) lowerCamelCase : Optional[int] = field(default=10_24 , metadata={'help': 'Length of sequences to be evaluated.'} ) lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} ) @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} ) lowerCamelCase : Optional[int] = field(default=snake_case__ , metadata={'help': 'Number of workers used for code evaluation.'} ) lowerCamelCase : Optional[int] = field( default=snake_case__ , metadata={'help': 'The number of human-eval tasks to run. If not included all tasks are evaluated.'} , ) lowerCamelCase : Optional[bool] = field( default=snake_case__ , metadata={'help': 'Sample from the language model\'s output distribution.'} ) lowerCamelCase : Optional[float] = field(default=0.2 , metadata={'help': 'Sampling temperature used for generation.'} ) lowerCamelCase : Optional[int] = field(default=2_56 , metadata={'help': 'Maximum number of newly generated tokens.'} ) lowerCamelCase : Optional[int] = field(default=0 , metadata={'help': 'Top-k parameter used for generation.'} ) lowerCamelCase : Optional[float] = field(default=0.95 , metadata={'help': 'Top-p parameter used for nucleus sampling.'} ) lowerCamelCase : Optional[int] = field(default=10 , metadata={'help': 'Number of generations to run in parallel.'} ) lowerCamelCase : Optional[int] = field( default=2_00 , metadata={'help': 'Number of completions to generate for each sample.'} ) lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} ) lowerCamelCase : Optional[str] = field( default='eval_results.json' , metadata={'help': 'Random seed used for evaluation.'} ) lowerCamelCase : Optional[str] = field( default='0' , metadata={'help': 'Allow `code_eval` to execute Python code on machine'} ) lowerCamelCase : Optional[int] = field( default=-1 , metadata={ 'help': ( 'Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive' ' number corresponds to which GPU device id to run on.' ) } , ) @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : Optional[int] = field( default=snake_case__ , metadata={ 'help': 'The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.' } , ) lowerCamelCase : Optional[str] = field( default='transformersbook/codeparrot' , metadata={'help': 'Folder or name of dataset to process.'} ) lowerCamelCase : Optional[str] = field( default='codeparrot-clean' , metadata={'help': 'Folder to save processed processed dataset.'} ) lowerCamelCase : Optional[int] = field( default=10_00_00 , metadata={'help': 'Number of files to save per JSON output file.'} ) lowerCamelCase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} ) lowerCamelCase : Optional[float] = field( default=10_00 , metadata={'help': 'Maximum line length in file, otherwise file is filtered.'} ) lowerCamelCase : Optional[float] = field( default=1_00 , metadata={'help': 'Maximum mean line length in file, otherwise file is filtered.'} ) lowerCamelCase : Optional[float] = field( default=0.25 , metadata={'help': 'Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'} ) lowerCamelCase : Optional[float] = field( default=1.5 , metadata={'help': 'Minimum character token ratio for the file, otherwise file is filtered.'} ) lowerCamelCase : Optional[float] = field( default=0.7 , metadata={'help': 'Probability for filtering config, test and uncommon files.'} ) lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} , ) lowerCamelCase : Optional[bool] = field( default=snake_case__ , metadata={'help': 'If True, near-duplicate samples are removed.'} ) lowerCamelCase : Optional[float] = field( default=0.85 , metadata={'help': 'Jaccard threshold for near-duplicate samples.'} ) @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : Optional[str] = field( default='gpt2' , metadata={'help': 'Base tokenizer to build new tokenizer from.'} ) lowerCamelCase : Optional[str] = field( default='transformersbook/codeparrot-train' , metadata={'help': 'Dataset to train tokenizer on.'} ) lowerCamelCase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} ) lowerCamelCase : Optional[int] = field(default=20_00_00 , metadata={'help': 'Number of examples to train tokenizer on.'} ) lowerCamelCase : Optional[int] = field( default=3_27_68 , metadata={'help': 'Number of examples to train the tokenizer on.'} ) lowerCamelCase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of new tokenizer.'} ) lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'Push saved tokenizer to the hub.'} ) @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} ) lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path to the dataset to pretokenize.'} ) lowerCamelCase : Optional[str] = field( default='tokenized-codeparrot-train' , metadata={'help': 'Repo name of the pretokenized data.'} ) lowerCamelCase : Optional[int] = field(default=snake_case__ , metadata={'help': 'Number of workers used for code evaluation.'} ) @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : Optional[str] = field( default='gpt2-large' , metadata={'help': 'Configuration to use for model initialization.'} ) lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot' , metadata={'help': 'Tokenizer attached to model.'} ) lowerCamelCase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of the created model.'} ) lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'Push saved tokenizer to the hub.'} )
687
0
"""simple docstring""" import itertools import string from collections.abc import Generator, Iterable def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any: _SCREAMING_SNAKE_CASE : Optional[Any] = iter(__SCREAMING_SNAKE_CASE ) while True: _SCREAMING_SNAKE_CASE : List[Any] = tuple(itertools.islice(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) if not chunk: return yield chunk def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[int]: _SCREAMING_SNAKE_CASE : Optional[Any] = ''.join([c.upper() for c in dirty if c in string.ascii_letters] ) _SCREAMING_SNAKE_CASE : Union[str, Any] = '' if len(__SCREAMING_SNAKE_CASE ) < 2: return dirty for i in range(len(__SCREAMING_SNAKE_CASE ) - 1 ): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(__SCREAMING_SNAKE_CASE ) & 1: clean += "X" return clean def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[int]: _SCREAMING_SNAKE_CASE : List[Any] = 'ABCDEFGHIKLMNOPQRSTUVWXYZ' # we're using a list instead of a '2d' array because it makes the math # for setting up the table and doing the actual encoding/decoding simpler _SCREAMING_SNAKE_CASE : List[str] = [] # copy key chars into the table if they are in `alphabet` ignoring duplicates for char in key.upper(): if char not in table and char in alphabet: table.append(__SCREAMING_SNAKE_CASE ) # fill the rest of the table in with the remaining alphabet chars for char in alphabet: if char not in table: table.append(__SCREAMING_SNAKE_CASE ) return table def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> int: _SCREAMING_SNAKE_CASE : Tuple = generate_table(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[str] = prepare_input(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[Any] = '' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(__SCREAMING_SNAKE_CASE , 2 ): _SCREAMING_SNAKE_CASE : int = divmod(table.index(__SCREAMING_SNAKE_CASE ) , 5 ) _SCREAMING_SNAKE_CASE : List[str] = divmod(table.index(__SCREAMING_SNAKE_CASE ) , 5 ) if rowa == rowa: ciphertext += table[rowa * 5 + (cola + 1) % 5] ciphertext += table[rowa * 5 + (cola + 1) % 5] elif cola == cola: ciphertext += table[((rowa + 1) % 5) * 5 + cola] ciphertext += table[((rowa + 1) % 5) * 5 + cola] else: # rectangle ciphertext += table[rowa * 5 + cola] ciphertext += table[rowa * 5 + cola] return ciphertext def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[Any]: _SCREAMING_SNAKE_CASE : List[Any] = generate_table(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : str = '' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(__SCREAMING_SNAKE_CASE , 2 ): _SCREAMING_SNAKE_CASE : List[str] = divmod(table.index(__SCREAMING_SNAKE_CASE ) , 5 ) _SCREAMING_SNAKE_CASE : List[str] = divmod(table.index(__SCREAMING_SNAKE_CASE ) , 5 ) if rowa == rowa: plaintext += table[rowa * 5 + (cola - 1) % 5] plaintext += table[rowa * 5 + (cola - 1) % 5] elif cola == cola: plaintext += table[((rowa - 1) % 5) * 5 + cola] plaintext += table[((rowa - 1) % 5) * 5 + cola] else: # rectangle plaintext += table[rowa * 5 + cola] plaintext += table[rowa * 5 + cola] return plaintext
338
import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE__ ( self: List[Any] ): _lowerCAmelCase :List[str] = 'ylacombe/bark-small' _lowerCAmelCase :int = tempfile.mkdtemp() _lowerCAmelCase :List[str] = 'en_speaker_1' _lowerCAmelCase :Union[str, Any] = 'This is a test string' _lowerCAmelCase :List[Any] = 'speaker_embeddings_path.json' _lowerCAmelCase :str = 'speaker_embeddings' def SCREAMING_SNAKE_CASE__ ( self: str , **_UpperCAmelCase: Optional[Any] ): return AutoTokenizer.from_pretrained(self.checkpoint , **_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE__ ( self: Tuple ): _lowerCAmelCase :List[Any] = self.get_tokenizer() _lowerCAmelCase :List[str] = BarkProcessor(tokenizer=_UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) _lowerCAmelCase :List[str] = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def SCREAMING_SNAKE_CASE__ ( self: List[str] ): _lowerCAmelCase :List[str] = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) _lowerCAmelCase :Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) _lowerCAmelCase :Any = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): _lowerCAmelCase :Tuple = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) _lowerCAmelCase :List[Any] = 35 _lowerCAmelCase :Optional[int] = 2 _lowerCAmelCase :Dict = 8 _lowerCAmelCase :Dict = { 'semantic_prompt': np.ones(_UpperCAmelCase ), 'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ), 'fine_prompt': np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset _lowerCAmelCase :Dict = processor(text=self.input_string , voice_preset=_UpperCAmelCase ) _lowerCAmelCase :List[Any] = inputs['history_prompt'] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() ) # test loading voice preset from npz file _lowerCAmelCase :int = os.path.join(self.tmpdirname , 'file.npz' ) np.savez(_UpperCAmelCase , **_UpperCAmelCase ) _lowerCAmelCase :Dict = processor(text=self.input_string , voice_preset=_UpperCAmelCase ) _lowerCAmelCase :Optional[int] = inputs['history_prompt'] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() ) # test loading voice preset from the hub _lowerCAmelCase :Tuple = processor(text=self.input_string , voice_preset=self.voice_preset ) def SCREAMING_SNAKE_CASE__ ( self: List[Any] ): _lowerCAmelCase :Tuple = self.get_tokenizer() _lowerCAmelCase :Union[str, Any] = BarkProcessor(tokenizer=_UpperCAmelCase ) _lowerCAmelCase :List[Any] = processor(text=self.input_string ) _lowerCAmelCase :List[str] = tokenizer( self.input_string , padding='max_length' , max_length=256 , add_special_tokens=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
687
0
import os from bleurt import score # From: git+https://github.com/google-research/bleurt.git import datasets __UpperCAmelCase = datasets.logging.get_logger(__name__) __UpperCAmelCase = '\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n' __UpperCAmelCase = '\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n' __UpperCAmelCase = '\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> bleurt = datasets.load_metric(\"bleurt\")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [1.03, 1.04]\n' __UpperCAmelCase = { 'bleurt-tiny-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip', 'bleurt-tiny-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip', 'bleurt-base-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip', 'bleurt-base-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip', 'bleurt-large-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip', 'bleurt-large-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip', 'BLEURT-20-D3': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip', 'BLEURT-20-D6': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip', 'BLEURT-20-D12': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip', 'BLEURT-20': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip', } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class __a ( datasets.Metric ): def A ( self : Dict ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/google-research/bleurt""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , codebase_urls=["""https://github.com/google-research/bleurt"""] , reference_urls=["""https://github.com/google-research/bleurt""", """https://arxiv.org/abs/2004.04696"""] , ) def A ( self : List[Any] , UpperCAmelCase : Dict ): # check that config name specifies a valid BLEURT model if self.config_name == "default": logger.warning( """Using default BLEURT-Base checkpoint for sequence maximum length 128. """ """You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').""" ) lowerCAmelCase_ : str = 'bleurt-base-128' if self.config_name.lower() in CHECKPOINT_URLS: lowerCAmelCase_ : int = self.config_name.lower() elif self.config_name.upper() in CHECKPOINT_URLS: lowerCAmelCase_ : Dict = self.config_name.upper() else: raise KeyError( F'{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}' ) # download the model checkpoint specified by self.config_name and set up the scorer lowerCAmelCase_ : Optional[Any] = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] ) lowerCAmelCase_ : str = score.BleurtScorer(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) ) def A ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] ): lowerCAmelCase_ : Optional[int] = self.scorer.score(references=_UpperCAmelCase , candidates=_UpperCAmelCase ) return {"scores": scores}
600
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a = logging.get_logger(__name__) a = { """bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""", """bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""", """bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""", """bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""", """bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""", """bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""", """bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""", """bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""", """bert-large-uncased-whole-word-masking""": ( """https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json""" ), """bert-large-cased-whole-word-masking""": ( """https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json""" ), """bert-large-uncased-whole-word-masking-finetuned-squad""": ( """https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json""" ), """bert-large-cased-whole-word-masking-finetuned-squad""": ( """https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json""" ), """bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""", """bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""", """bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""", """cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""", """cl-tohoku/bert-base-japanese-whole-word-masking""": ( """https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json""" ), """cl-tohoku/bert-base-japanese-char""": ( """https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json""" ), """cl-tohoku/bert-base-japanese-char-whole-word-masking""": ( """https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json""" ), """TurkuNLP/bert-base-finnish-cased-v1""": ( """https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json""" ), """TurkuNLP/bert-base-finnish-uncased-v1""": ( """https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json""" ), """wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""", # See all BERT models at https://huggingface.co/models?filter=bert } class UpperCAmelCase_ (snake_case__ ): """simple docstring""" lowerCamelCase : int = 'bert' def __init__( self: Optional[Any] , _UpperCAmelCase: Tuple=3_0522 , _UpperCAmelCase: int=768 , _UpperCAmelCase: Union[str, Any]=12 , _UpperCAmelCase: Dict=12 , _UpperCAmelCase: List[Any]=3072 , _UpperCAmelCase: List[Any]="gelu" , _UpperCAmelCase: Union[str, Any]=0.1 , _UpperCAmelCase: Dict=0.1 , _UpperCAmelCase: List[Any]=512 , _UpperCAmelCase: Optional[Any]=2 , _UpperCAmelCase: Optional[int]=0.0_2 , _UpperCAmelCase: Any=1e-1_2 , _UpperCAmelCase: Optional[Any]=0 , _UpperCAmelCase: Union[str, Any]="absolute" , _UpperCAmelCase: Dict=True , _UpperCAmelCase: Optional[Any]=None , **_UpperCAmelCase: Optional[int] , ): super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase ) _lowerCAmelCase :List[Any] = vocab_size _lowerCAmelCase :Tuple = hidden_size _lowerCAmelCase :Dict = num_hidden_layers _lowerCAmelCase :Optional[Any] = num_attention_heads _lowerCAmelCase :List[Any] = hidden_act _lowerCAmelCase :int = intermediate_size _lowerCAmelCase :Tuple = hidden_dropout_prob _lowerCAmelCase :Tuple = attention_probs_dropout_prob _lowerCAmelCase :List[Any] = max_position_embeddings _lowerCAmelCase :Dict = type_vocab_size _lowerCAmelCase :Any = initializer_range _lowerCAmelCase :int = layer_norm_eps _lowerCAmelCase :List[Any] = position_embedding_type _lowerCAmelCase :int = use_cache _lowerCAmelCase :Union[str, Any] = classifier_dropout class UpperCAmelCase_ (snake_case__ ): """simple docstring""" @property def SCREAMING_SNAKE_CASE__ ( self: List[Any] ): if self.task == "multiple-choice": _lowerCAmelCase :List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _lowerCAmelCase :Any = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ] )
687
0
'''simple docstring''' import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen from ..table import array_cast from ..utils.file_utils import is_local_path from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: import PIL.Image from .features import FeatureType lowercase =None lowercase ='<' if sys.byteorder == 'little' else '>' # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image lowercase =[ np.dtype('|b1'), np.dtype('|u1'), np.dtype('<u2'), np.dtype('>u2'), np.dtype('<i2'), np.dtype('>i2'), np.dtype('<u4'), np.dtype('>u4'), np.dtype('<i4'), np.dtype('>i4'), np.dtype('<f4'), np.dtype('>f4'), np.dtype('<f8'), np.dtype('>f8'), ] @dataclass class __magic_name__ : UpperCAmelCase =True UpperCAmelCase =None # Automatically constructed UpperCAmelCase ="PIL.Image.Image" UpperCAmelCase =pa.struct({"bytes": pa.binary(), "path": pa.string()} ) UpperCAmelCase =field(default="Image" ,init=snake_case__ ,repr=snake_case__ ) def __call__( self) -> str: '''simple docstring''' return self.pa_type def lowerCAmelCase ( self , snake_case) -> int: '''simple docstring''' if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support encoding images, please install \'Pillow\'.') if isinstance(_UpperCAmelCase , _UpperCAmelCase): _UpperCAmelCase : Optional[int] =np.array(_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase): return {"path": value, "bytes": None} elif isinstance(_UpperCAmelCase , _UpperCAmelCase): return {"path": None, "bytes": value} elif isinstance(_UpperCAmelCase , np.ndarray): # convert the image array to PNG/TIFF bytes return encode_np_array(_UpperCAmelCase) elif isinstance(_UpperCAmelCase , PIL.Image.Image): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(_UpperCAmelCase) elif value.get('path') is not None and os.path.isfile(value['path']): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get('path')} elif value.get('bytes') is not None or value.get('path') is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get('bytes'), "path": value.get('path')} else: raise ValueError( f"An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.") def lowerCAmelCase ( self , snake_case , snake_case=None) -> List[str]: '''simple docstring''' if not self.decode: raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.') if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support decoding images, please install \'Pillow\'.') if token_per_repo_id is None: _UpperCAmelCase : Union[str, Any] ={} _UpperCAmelCase : Union[str, Any] =value['path'], value['bytes'] if bytes_ is None: if path is None: raise ValueError(f"An image should have one of 'path' or 'bytes' but both are None in {value}.") else: if is_local_path(_UpperCAmelCase): _UpperCAmelCase : Any =PIL.Image.open(_UpperCAmelCase) else: _UpperCAmelCase : str =path.split('::')[-1] try: _UpperCAmelCase : List[str] =string_to_dict(_UpperCAmelCase , config.HUB_DATASETS_URL)['repo_id'] _UpperCAmelCase : int =token_per_repo_id.get(_UpperCAmelCase) except ValueError: _UpperCAmelCase : List[str] =None with xopen(_UpperCAmelCase , 'rb' , use_auth_token=_UpperCAmelCase) as f: _UpperCAmelCase : Optional[int] =BytesIO(f.read()) _UpperCAmelCase : List[Any] =PIL.Image.open(bytes_) else: _UpperCAmelCase : Any =PIL.Image.open(BytesIO(bytes_)) image.load() # to avoid "Too many open files" errors return image def lowerCAmelCase ( self) -> Union[str, Any]: '''simple docstring''' from .features import Value return ( self if self.decode else { "bytes": Value('binary'), "path": Value('string'), } ) def lowerCAmelCase ( self , snake_case) -> Optional[int]: '''simple docstring''' if pa.types.is_string(storage.type): _UpperCAmelCase : Optional[Any] =pa.array([None] * len(_UpperCAmelCase) , type=pa.binary()) _UpperCAmelCase : Union[str, Any] =pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null()) elif pa.types.is_binary(storage.type): _UpperCAmelCase : Optional[Any] =pa.array([None] * len(_UpperCAmelCase) , type=pa.string()) _UpperCAmelCase : Union[str, Any] =pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null()) elif pa.types.is_struct(storage.type): if storage.type.get_field_index('bytes') >= 0: _UpperCAmelCase : List[str] =storage.field('bytes') else: _UpperCAmelCase : Optional[int] =pa.array([None] * len(_UpperCAmelCase) , type=pa.binary()) if storage.type.get_field_index('path') >= 0: _UpperCAmelCase : Optional[int] =storage.field('path') else: _UpperCAmelCase : Optional[int] =pa.array([None] * len(_UpperCAmelCase) , type=pa.string()) _UpperCAmelCase : Optional[int] =pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null()) elif pa.types.is_list(storage.type): _UpperCAmelCase : Tuple =pa.array( [encode_np_array(np.array(_UpperCAmelCase))['bytes'] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , ) _UpperCAmelCase : Any =pa.array([None] * len(_UpperCAmelCase) , type=pa.string()) _UpperCAmelCase : Union[str, Any] =pa.StructArray.from_arrays( [bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null()) return array_cast(_UpperCAmelCase , self.pa_type) def lowerCAmelCase ( self , snake_case) -> Dict: '''simple docstring''' @no_op_if_value_is_null def path_to_bytes(snake_case): with xopen(_UpperCAmelCase , 'rb') as f: _UpperCAmelCase : Optional[int] =f.read() return bytes_ _UpperCAmelCase : Union[str, Any] =pa.array( [ (path_to_bytes(x['path']) if x['bytes'] is None else x['bytes']) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) _UpperCAmelCase : Dict =pa.array( [os.path.basename(_UpperCAmelCase) if path is not None else None for path in storage.field('path').to_pylist()] , type=pa.string() , ) _UpperCAmelCase : Tuple =pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null()) return array_cast(_UpperCAmelCase , self.pa_type) def lowerCamelCase__ ( ): '''simple docstring''' if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support encoding images, please install \'Pillow\'.' ) global _IMAGE_COMPRESSION_FORMATS if _IMAGE_COMPRESSION_FORMATS is None: PIL.Image.init() _UpperCAmelCase : Any =list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) ) return _IMAGE_COMPRESSION_FORMATS def lowerCamelCase__ ( __lowerCamelCase : "PIL.Image.Image" ): '''simple docstring''' _UpperCAmelCase : int =BytesIO() if image.format in list_image_compression_formats(): _UpperCAmelCase : Any =image.format else: _UpperCAmelCase : Dict ='PNG' if image.mode in ['1', 'L', 'LA', 'RGB', 'RGBA'] else 'TIFF' image.save(__lowerCamelCase , format=__lowerCamelCase ) return buffer.getvalue() def lowerCamelCase__ ( __lowerCamelCase : "PIL.Image.Image" ): '''simple docstring''' if hasattr(__lowerCamelCase , 'filename' ) and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(__lowerCamelCase )} def lowerCamelCase__ ( __lowerCamelCase : np.ndarray ): '''simple docstring''' if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support encoding images, please install \'Pillow\'.' ) _UpperCAmelCase : List[Any] =array.dtype _UpperCAmelCase : Tuple =dtype.byteorder if dtype.byteorder != '=' else _NATIVE_BYTEORDER _UpperCAmelCase : Union[str, Any] =dtype.kind _UpperCAmelCase : int =dtype.itemsize _UpperCAmelCase : List[str] =None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: _UpperCAmelCase : Union[str, Any] =np.dtype('|u1' ) if dtype_kind not in ["u", "i"]: raise TypeError( f"Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays." ) if dtype is not dest_dtype: warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'" ) # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: _UpperCAmelCase : List[str] =dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: _UpperCAmelCase : int =dtype_byteorder + dtype_kind + str(__lowerCamelCase ) _UpperCAmelCase : Union[str, Any] =np.dtype(__lowerCamelCase ) if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES: warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'" ) break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( f"Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}" ) _UpperCAmelCase : Dict =PIL.Image.fromarray(array.astype(__lowerCamelCase ) ) return {"path": None, "bytes": image_to_bytes(__lowerCamelCase )} def lowerCamelCase__ ( __lowerCamelCase : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ): '''simple docstring''' if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError('To support encoding images, please install \'Pillow\'.' ) if objs: _UpperCAmelCase : Optional[int] =first_non_null_value(__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(__lowerCamelCase , np.ndarray ): _UpperCAmelCase : Any =no_op_if_value_is_null(__lowerCamelCase ) return [obj_to_image_dict_func(__lowerCamelCase ) for obj in objs] elif isinstance(__lowerCamelCase , PIL.Image.Image ): _UpperCAmelCase : Dict =no_op_if_value_is_null(__lowerCamelCase ) return [obj_to_image_dict_func(__lowerCamelCase ) for obj in objs] else: return objs else: return objs
446
import inspect from typing import Optional, Union import numpy as np import PIL import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import ( PIL_INTERPOLATION, randn_tensor, ) def UpperCamelCase_( __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Tuple ): """simple docstring""" if isinstance(__magic_name__ , torch.Tensor ): return image elif isinstance(__magic_name__ , PIL.Image.Image ): _lowerCAmelCase :Tuple = [image] if isinstance(image[0] , PIL.Image.Image ): _lowerCAmelCase :List[Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image] _lowerCAmelCase :Optional[Any] = np.concatenate(__magic_name__ , axis=0 ) _lowerCAmelCase :Any = np.array(__magic_name__ ).astype(np.floataa ) / 255.0 _lowerCAmelCase :Optional[int] = image.transpose(0 , 3 , 1 , 2 ) _lowerCAmelCase :int = 2.0 * image - 1.0 _lowerCAmelCase :Optional[int] = torch.from_numpy(__magic_name__ ) elif isinstance(image[0] , torch.Tensor ): _lowerCAmelCase :str = torch.cat(__magic_name__ , dim=0 ) return image def UpperCamelCase_( __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : int=0.9995 ): """simple docstring""" if not isinstance(__magic_name__ , np.ndarray ): _lowerCAmelCase :Tuple = True _lowerCAmelCase :str = va.device _lowerCAmelCase :List[str] = va.cpu().numpy() _lowerCAmelCase :List[str] = va.cpu().numpy() _lowerCAmelCase :Any = np.sum(va * va / (np.linalg.norm(__magic_name__ ) * np.linalg.norm(__magic_name__ )) ) if np.abs(__magic_name__ ) > DOT_THRESHOLD: _lowerCAmelCase :Optional[Any] = (1 - t) * va + t * va else: _lowerCAmelCase :int = np.arccos(__magic_name__ ) _lowerCAmelCase :Union[str, Any] = np.sin(__magic_name__ ) _lowerCAmelCase :Union[str, Any] = theta_a * t _lowerCAmelCase :str = np.sin(__magic_name__ ) _lowerCAmelCase :Any = np.sin(theta_a - theta_t ) / sin_theta_a _lowerCAmelCase :Optional[Any] = sin_theta_t / sin_theta_a _lowerCAmelCase :List[Any] = sa * va + sa * va if inputs_are_torch: _lowerCAmelCase :int = torch.from_numpy(__magic_name__ ).to(__magic_name__ ) return va def UpperCamelCase_( __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ): """simple docstring""" _lowerCAmelCase :Any = F.normalize(__magic_name__ , dim=-1 ) _lowerCAmelCase :str = F.normalize(__magic_name__ , dim=-1 ) return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 ) def UpperCamelCase_( __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] ): """simple docstring""" for param in model.parameters(): _lowerCAmelCase :List[str] = value class UpperCAmelCase_ (snake_case__ ): """simple docstring""" def __init__( self: Any , _UpperCAmelCase: AutoencoderKL , _UpperCAmelCase: CLIPTextModel , _UpperCAmelCase: CLIPModel , _UpperCAmelCase: CLIPTokenizer , _UpperCAmelCase: UNetaDConditionModel , _UpperCAmelCase: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , _UpperCAmelCase: CLIPFeatureExtractor , _UpperCAmelCase: str=None , _UpperCAmelCase: Tuple=None , _UpperCAmelCase: Union[str, Any]=None , ): super().__init__() self.register_modules( vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , clip_model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , coca_model=_UpperCAmelCase , coca_tokenizer=_UpperCAmelCase , coca_transform=_UpperCAmelCase , ) _lowerCAmelCase :int = ( feature_extractor.size if isinstance(feature_extractor.size , _UpperCAmelCase ) else feature_extractor.size['shortest_edge'] ) _lowerCAmelCase :Union[str, Any] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std ) set_requires_grad(self.text_encoder , _UpperCAmelCase ) set_requires_grad(self.clip_model , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Optional[Union[str, int]] = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory _lowerCAmelCase :Any = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: List[Any] ): self.enable_attention_slicing(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Any ): set_requires_grad(self.vae , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ): set_requires_grad(self.vae , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Any ): set_requires_grad(self.unet , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: List[Any] ): set_requires_grad(self.unet , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Dict ): # get the original timestep using init_timestep _lowerCAmelCase :Optional[Any] = min(int(num_inference_steps * strength ) , _UpperCAmelCase ) _lowerCAmelCase :List[str] = max(num_inference_steps - init_timestep , 0 ) _lowerCAmelCase :Tuple = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Union[str, Any]=None ): if not isinstance(_UpperCAmelCase , torch.Tensor ): raise ValueError(f"""`image` has to be of type `torch.Tensor` but is {type(_UpperCAmelCase )}""" ) _lowerCAmelCase :Union[str, Any] = image.to(device=_UpperCAmelCase , dtype=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ): _lowerCAmelCase :List[Any] = [ self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_UpperCAmelCase ) ] _lowerCAmelCase :List[str] = torch.cat(_UpperCAmelCase , dim=0 ) else: _lowerCAmelCase :List[str] = self.vae.encode(_UpperCAmelCase ).latent_dist.sample(_UpperCAmelCase ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor _lowerCAmelCase :List[Any] = 0.1_8_2_1_5 * init_latents _lowerCAmelCase :List[Any] = init_latents.repeat_interleave(_UpperCAmelCase , dim=0 ) _lowerCAmelCase :Dict = randn_tensor(init_latents.shape , generator=_UpperCAmelCase , device=_UpperCAmelCase , dtype=_UpperCAmelCase ) # get latents _lowerCAmelCase :Dict = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) _lowerCAmelCase :List[str] = init_latents return latents def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Union[str, Any] ): _lowerCAmelCase :Optional[int] = self.coca_transform(_UpperCAmelCase ).unsqueeze(0 ) with torch.no_grad(), torch.cuda.amp.autocast(): _lowerCAmelCase :Optional[Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) ) _lowerCAmelCase :int = self.coca_tokenizer.decode(generated[0].cpu().numpy() ) return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' ) def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: List[str] ): _lowerCAmelCase :Optional[int] = self.feature_extractor.preprocess(_UpperCAmelCase ) _lowerCAmelCase :List[Any] = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half() _lowerCAmelCase :List[str] = self.clip_model.get_image_features(_UpperCAmelCase ) _lowerCAmelCase :List[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_UpperCAmelCase ) _lowerCAmelCase :Dict = image_embeddings_clip.repeat_interleave(_UpperCAmelCase , dim=0 ) return image_embeddings_clip @torch.enable_grad() def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: List[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple , _UpperCAmelCase: Dict , _UpperCAmelCase: str , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple , ): _lowerCAmelCase :Dict = latents.detach().requires_grad_() _lowerCAmelCase :Optional[Any] = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase ) # predict the noise residual _lowerCAmelCase :Optional[Any] = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ): _lowerCAmelCase :int = self.scheduler.alphas_cumprod[timestep] _lowerCAmelCase :Optional[int] = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _lowerCAmelCase :str = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 _lowerCAmelCase :Optional[Any] = torch.sqrt(_UpperCAmelCase ) _lowerCAmelCase :List[str] = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler , _UpperCAmelCase ): _lowerCAmelCase :Dict = self.scheduler.sigmas[index] _lowerCAmelCase :Optional[Any] = latents - sigma * noise_pred else: raise ValueError(f"""scheduler type {type(self.scheduler )} not supported""" ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor _lowerCAmelCase :Tuple = 1 / 0.1_8_2_1_5 * sample _lowerCAmelCase :Optional[Any] = self.vae.decode(_UpperCAmelCase ).sample _lowerCAmelCase :List[Any] = (image / 2 + 0.5).clamp(0 , 1 ) _lowerCAmelCase :Tuple = transforms.Resize(self.feature_extractor_size )(_UpperCAmelCase ) _lowerCAmelCase :Tuple = self.normalize(_UpperCAmelCase ).to(latents.dtype ) _lowerCAmelCase :List[Any] = self.clip_model.get_image_features(_UpperCAmelCase ) _lowerCAmelCase :List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_UpperCAmelCase ) _lowerCAmelCase :Tuple = spherical_dist_loss(_UpperCAmelCase , _UpperCAmelCase ).mean() * clip_guidance_scale _lowerCAmelCase :str = -torch.autograd.grad(_UpperCAmelCase , _UpperCAmelCase )[0] if isinstance(self.scheduler , _UpperCAmelCase ): _lowerCAmelCase :Union[str, Any] = latents.detach() + grads * (sigma**2) _lowerCAmelCase :Dict = noise_pred_original else: _lowerCAmelCase :Optional[int] = noise_pred_original - torch.sqrt(_UpperCAmelCase ) * grads return noise_pred, latents @torch.no_grad() def __call__( self: Optional[int] , _UpperCAmelCase: Union[torch.FloatTensor, PIL.Image.Image] , _UpperCAmelCase: Union[torch.FloatTensor, PIL.Image.Image] , _UpperCAmelCase: Optional[str] = None , _UpperCAmelCase: Optional[str] = None , _UpperCAmelCase: Optional[int] = 512 , _UpperCAmelCase: Optional[int] = 512 , _UpperCAmelCase: float = 0.6 , _UpperCAmelCase: Optional[int] = 50 , _UpperCAmelCase: Optional[float] = 7.5 , _UpperCAmelCase: Optional[int] = 1 , _UpperCAmelCase: float = 0.0 , _UpperCAmelCase: Optional[float] = 100 , _UpperCAmelCase: Optional[torch.Generator] = None , _UpperCAmelCase: Optional[str] = "pil" , _UpperCAmelCase: bool = True , _UpperCAmelCase: float = 0.8 , _UpperCAmelCase: float = 0.1 , _UpperCAmelCase: float = 0.1 , ): if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) != batch_size: raise ValueError(f"""You have passed {batch_size} batch_size, but only {len(_UpperCAmelCase )} generators.""" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" ) if isinstance(_UpperCAmelCase , torch.Generator ) and batch_size > 1: _lowerCAmelCase :int = [generator] + [None] * (batch_size - 1) _lowerCAmelCase :List[Any] = [ ('model', self.coca_model is None), ('tokenizer', self.coca_tokenizer is None), ('transform', self.coca_transform is None), ] _lowerCAmelCase :Optional[int] = [x[0] for x in coca_is_none if x[1]] _lowerCAmelCase :List[str] = ', '.join(_UpperCAmelCase ) # generate prompts with coca model if prompt is None if content_prompt is None: if len(_UpperCAmelCase ): raise ValueError( f"""Content prompt is None and CoCa [{coca_is_none_str}] is None.""" f"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" ) _lowerCAmelCase :List[Any] = self.get_image_description(_UpperCAmelCase ) if style_prompt is None: if len(_UpperCAmelCase ): raise ValueError( f"""Style prompt is None and CoCa [{coca_is_none_str}] is None.""" f""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" ) _lowerCAmelCase :Any = self.get_image_description(_UpperCAmelCase ) # get prompt text embeddings for content and style _lowerCAmelCase :Any = self.tokenizer( _UpperCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_UpperCAmelCase , return_tensors='pt' , ) _lowerCAmelCase :str = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0] _lowerCAmelCase :int = self.tokenizer( _UpperCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_UpperCAmelCase , return_tensors='pt' , ) _lowerCAmelCase :Union[str, Any] = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0] _lowerCAmelCase :List[str] = slerp(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # duplicate text embeddings for each generation per prompt _lowerCAmelCase :str = text_embeddings.repeat_interleave(_UpperCAmelCase , dim=0 ) # set timesteps _lowerCAmelCase :Any = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() ) _lowerCAmelCase :Dict = {} if accepts_offset: _lowerCAmelCase :Optional[int] = 1 self.scheduler.set_timesteps(_UpperCAmelCase , **_UpperCAmelCase ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device ) _lowerCAmelCase , _lowerCAmelCase :List[str] = self.get_timesteps(_UpperCAmelCase , _UpperCAmelCase , self.device ) _lowerCAmelCase :int = timesteps[:1].repeat(_UpperCAmelCase ) # Preprocess image _lowerCAmelCase :Dict = preprocess(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) _lowerCAmelCase :int = self.prepare_latents( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , text_embeddings.dtype , self.device , _UpperCAmelCase ) _lowerCAmelCase :Any = preprocess(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) _lowerCAmelCase :Union[str, Any] = self.prepare_latents( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , text_embeddings.dtype , self.device , _UpperCAmelCase ) _lowerCAmelCase :str = slerp(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if clip_guidance_scale > 0: _lowerCAmelCase :Optional[Any] = self.get_clip_image_embeddings(_UpperCAmelCase , _UpperCAmelCase ) _lowerCAmelCase :Dict = self.get_clip_image_embeddings(_UpperCAmelCase , _UpperCAmelCase ) _lowerCAmelCase :Any = slerp( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. _lowerCAmelCase :int = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: _lowerCAmelCase :Optional[int] = content_text_input.input_ids.shape[-1] _lowerCAmelCase :Union[str, Any] = self.tokenizer([''] , padding='max_length' , max_length=_UpperCAmelCase , return_tensors='pt' ) _lowerCAmelCase :Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt _lowerCAmelCase :Optional[int] = uncond_embeddings.repeat_interleave(_UpperCAmelCase , dim=0 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _lowerCAmelCase :int = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. _lowerCAmelCase :Tuple = (batch_size, self.unet.config.in_channels, height // 8, width // 8) _lowerCAmelCase :Optional[Any] = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps _lowerCAmelCase :Any = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device='cpu' , dtype=_UpperCAmelCase ).to( self.device ) else: _lowerCAmelCase :List[Any] = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase ) else: if latents.shape != latents_shape: raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" ) _lowerCAmelCase :int = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler _lowerCAmelCase :Optional[Any] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] _lowerCAmelCase :Any = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) _lowerCAmelCase :Any = {} if accepts_eta: _lowerCAmelCase :Any = eta # check if the scheduler accepts generator _lowerCAmelCase :List[Any] = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) if accepts_generator: _lowerCAmelCase :List[Any] = generator with self.progress_bar(total=_UpperCAmelCase ): for i, t in enumerate(_UpperCAmelCase ): # expand the latents if we are doing classifier free guidance _lowerCAmelCase :Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _lowerCAmelCase :Tuple = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase ) # predict the noise residual _lowerCAmelCase :Optional[Any] = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample # perform classifier free guidance if do_classifier_free_guidance: _lowerCAmelCase , _lowerCAmelCase :List[str] = noise_pred.chunk(2 ) _lowerCAmelCase :Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: _lowerCAmelCase :List[Any] = ( text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings ) _lowerCAmelCase , _lowerCAmelCase :List[str] = self.cond_fn( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) # compute the previous noisy sample x_t -> x_t-1 _lowerCAmelCase :str = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor _lowerCAmelCase :str = 1 / 0.1_8_2_1_5 * latents _lowerCAmelCase :Any = self.vae.decode(_UpperCAmelCase ).sample _lowerCAmelCase :List[str] = (image / 2 + 0.5).clamp(0 , 1 ) _lowerCAmelCase :Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _lowerCAmelCase :List[Any] = self.numpy_to_pil(_UpperCAmelCase ) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=_UpperCAmelCase , nsfw_content_detected=_UpperCAmelCase )
687
0
import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": lowerCAmelCase__ : int =argparse.ArgumentParser() parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument( '--txt2img_unclip', default='kakaobrain/karlo-v1-alpha', type=str, required=False, help='The pretrained txt2img unclip.', ) lowerCAmelCase__ : List[Any] =parser.parse_args() lowerCAmelCase__ : Dict =UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) lowerCAmelCase__ : Any =CLIPImageProcessor() lowerCAmelCase__ : Tuple =CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14') lowerCAmelCase__ : List[Any] =UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
101
from __future__ import annotations from collections.abc import Sequence from typing import Literal def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str ): """simple docstring""" _lowerCAmelCase :Optional[int] = list(__magic_name__ ) _lowerCAmelCase :Dict = list(__magic_name__ ) _lowerCAmelCase :Any = 0 for i in range(len(__magic_name__ ) ): if lista[i] != lista[i]: count += 1 _lowerCAmelCase :Union[str, Any] = '_' if count > 1: return False else: return "".join(__magic_name__ ) def UpperCamelCase_( __magic_name__ : list[str] ): """simple docstring""" _lowerCAmelCase :int = [] while True: _lowerCAmelCase :str = ['$'] * len(__magic_name__ ) _lowerCAmelCase :Optional[int] = [] for i in range(len(__magic_name__ ) ): for j in range(i + 1 , len(__magic_name__ ) ): _lowerCAmelCase :int = compare_string(binary[i] , binary[j] ) if k is False: _lowerCAmelCase :str = '*' _lowerCAmelCase :Union[str, Any] = '*' temp.append('X' ) for i in range(len(__magic_name__ ) ): if checka[i] == "$": pi.append(binary[i] ) if len(__magic_name__ ) == 0: return pi _lowerCAmelCase :Any = list(set(__magic_name__ ) ) def UpperCamelCase_( __magic_name__ : int , __magic_name__ : Sequence[float] ): """simple docstring""" _lowerCAmelCase :str = [] for minterm in minterms: _lowerCAmelCase :Any = '' for _ in range(__magic_name__ ): _lowerCAmelCase :Tuple = str(minterm % 2 ) + string minterm //= 2 temp.append(__magic_name__ ) return temp def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str , __magic_name__ : int ): """simple docstring""" _lowerCAmelCase :Optional[Any] = list(__magic_name__ ) _lowerCAmelCase :List[Any] = list(__magic_name__ ) _lowerCAmelCase :Optional[Any] = 0 for i in range(len(__magic_name__ ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def UpperCamelCase_( __magic_name__ : list[list[int]] , __magic_name__ : list[str] ): """simple docstring""" _lowerCAmelCase :str = [] _lowerCAmelCase :List[str] = [0] * len(__magic_name__ ) for i in range(len(chart[0] ) ): _lowerCAmelCase :Dict = 0 _lowerCAmelCase :Optional[Any] = -1 for j in range(len(__magic_name__ ) ): if chart[j][i] == 1: count += 1 _lowerCAmelCase :List[Any] = j if count == 1: _lowerCAmelCase :Dict = 1 for i in range(len(__magic_name__ ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(__magic_name__ ) ): _lowerCAmelCase :Dict = 0 temp.append(prime_implicants[i] ) while True: _lowerCAmelCase :Dict = 0 _lowerCAmelCase :Any = -1 _lowerCAmelCase :Optional[Any] = 0 for i in range(len(__magic_name__ ) ): _lowerCAmelCase :str = chart[i].count(1 ) if count_n > max_n: _lowerCAmelCase :Optional[Any] = count_n _lowerCAmelCase :Dict = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(__magic_name__ ) ): _lowerCAmelCase :str = 0 def UpperCamelCase_( __magic_name__ : list[str] , __magic_name__ : list[str] ): """simple docstring""" _lowerCAmelCase :str = [[0 for x in range(len(__magic_name__ ) )] for x in range(len(__magic_name__ ) )] for i in range(len(__magic_name__ ) ): _lowerCAmelCase :Tuple = prime_implicants[i].count('_' ) for j in range(len(__magic_name__ ) ): if is_for_table(prime_implicants[i] , binary[j] , __magic_name__ ): _lowerCAmelCase :str = 1 return chart def UpperCamelCase_( ): """simple docstring""" _lowerCAmelCase :Tuple = int(input('Enter the no. of variables\n' ) ) _lowerCAmelCase :Tuple = [ float(__magic_name__ ) for x in input( 'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split() ] _lowerCAmelCase :List[str] = decimal_to_binary(__magic_name__ , __magic_name__ ) _lowerCAmelCase :Any = check(__magic_name__ ) print('Prime Implicants are:' ) print(__magic_name__ ) _lowerCAmelCase :List[Any] = prime_implicant_chart(__magic_name__ , __magic_name__ ) _lowerCAmelCase :Tuple = selection(__magic_name__ , __magic_name__ ) print('Essential Prime Implicants are:' ) print(__magic_name__ ) if __name__ == "__main__": import doctest doctest.testmod() main()
687
0
'''simple docstring''' import os import re import shutil import sys import tempfile import unittest import black _a : Union[str, Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. _a : Optional[int] = " \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n" class _lowercase ( unittest.TestCase ): def a ( self : Dict ) -> Dict: __snake_case = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) ) __snake_case = self.diffusers_dir shutil.copy( os.path.join(_UpperCAmelCase , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , ) def a ( self : List[Any] ) -> int: __snake_case = 'src/diffusers' shutil.rmtree(self.diffusers_dir ) def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple=None ) -> List[Any]: __snake_case = comment + f'\nclass {class_name}(nn.Module):\n' + class_code if overwrite_result is not None: __snake_case = comment + f'\nclass {class_name}(nn.Module):\n' + overwrite_result __snake_case = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 ) __snake_case = black.format_str(_UpperCAmelCase , mode=_UpperCAmelCase ) __snake_case = os.path.join(self.diffusers_dir , 'new_code.py' ) with open(_UpperCAmelCase , 'w' , newline='\n' ) as f: f.write(_UpperCAmelCase ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(_UpperCAmelCase ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=_UpperCAmelCase ) with open(_UpperCAmelCase , 'r' ) as f: self.assertTrue(f.read() , _UpperCAmelCase ) def a ( self : Union[str, Any] ) -> str: __snake_case = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) def a ( self : Union[str, Any] ) -> List[str]: # Base copy consistency self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , ) # With no empty line at the end self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , _UpperCAmelCase , ) # Copy consistency with rename self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , _UpperCAmelCase ) , ) # Copy consistency with a really long name __snake_case = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason' self.check_copy_consistency( f'# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}' , f'{long_class_name}SchedulerOutput' , re.sub('Bert' , _UpperCAmelCase , _UpperCAmelCase ) , ) # Copy consistency with overwrite self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , _UpperCAmelCase , overwrite_result=re.sub('DDPM' , 'Test' , _UpperCAmelCase ) , )
56
import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py a = """\ @INPROCEEDINGS{Papineni02bleu:a, author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu}, title = {BLEU: a Method for Automatic Evaluation of Machine Translation}, booktitle = {}, year = {2002}, pages = {311--318} } @inproceedings{lin-och-2004-orange, title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\", author = \"Lin, Chin-Yew and Och, Franz Josef\", booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\", month = \"aug 23{--}aug 27\", year = \"2004\", address = \"Geneva, Switzerland\", publisher = \"COLING\", url = \"https://www.aclweb.org/anthology/C04-1072\", pages = \"501--507\", } """ a = """\ BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another. Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation, the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and remains one of the most popular automated and inexpensive metrics. Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations. Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness are not taken into account[citation needed]. BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1 representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional reference translations will increase the BLEU score. """ a = """ Computes BLEU score of translated segments against one or more references. Args: predictions: list of translations to score. Each translation should be tokenized into a list of tokens. references: list of lists of references for each translation. Each reference should be tokenized into a list of tokens. max_order: Maximum n-gram order to use when computing BLEU score. smooth: Whether or not to apply Lin et al. 2004 smoothing. Returns: 'bleu': bleu score, 'precisions': geometric mean of n-gram precisions, 'brevity_penalty': brevity penalty, 'length_ratio': ratio of lengths, 'translation_length': translation_length, 'reference_length': reference_length Examples: >>> predictions = [ ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample ... ] >>> references = [ ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references) ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference) ... ] >>> bleu = datasets.load_metric(\"bleu\") >>> results = bleu.compute(predictions=predictions, references=references) >>> print(results[\"bleu\"]) 1.0 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase_ (datasets.Metric ): """simple docstring""" def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ), 'references': datasets.Sequence( datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ), } ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[ 'https://en.wikipedia.org/wiki/BLEU', 'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213', ] , ) def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: int , _UpperCAmelCase: Optional[int]=4 , _UpperCAmelCase: Optional[int]=False ): _lowerCAmelCase :Any = compute_bleu( reference_corpus=_UpperCAmelCase , translation_corpus=_UpperCAmelCase , max_order=_UpperCAmelCase , smooth=_UpperCAmelCase ) ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) :Tuple = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
687
0
import warnings warnings.warn( 'memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: ' '`from accelerate import find_executable_batch_size` to avoid this warning.', FutureWarning, )
485
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) a = { """configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ """FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""", """FalconForCausalLM""", """FalconModel""", """FalconPreTrainedModel""", """FalconForSequenceClassification""", """FalconForTokenClassification""", """FalconForQuestionAnswering""", ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
687
0
"""simple docstring""" import os import sys import unittest lowercase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) lowercase = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''') lowercase = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''') class lowercase__ ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase_ ( self ) -> str: _UpperCAmelCase = get_test_to_tester_mapping(_UpperCAmelCase ) _UpperCAmelCase = get_test_to_tester_mapping(_UpperCAmelCase ) _UpperCAmelCase = {'BertModelTest': 'BertModelTester'} _UpperCAmelCase = { 'BlipModelTest': 'BlipModelTester', 'BlipTextImageModelTest': 'BlipTextImageModelsModelTester', 'BlipTextModelTest': 'BlipTextModelTester', 'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester', 'BlipVQAModelTest': 'BlipVQAModelTester', 'BlipVisionModelTest': 'BlipVisionModelTester', } self.assertEqual(get_test_info.to_json(_UpperCAmelCase ) , _UpperCAmelCase ) self.assertEqual(get_test_info.to_json(_UpperCAmelCase ) , _UpperCAmelCase ) def lowerCamelCase_ ( self ) -> Tuple: _UpperCAmelCase = get_model_to_test_mapping(_UpperCAmelCase ) _UpperCAmelCase = get_model_to_test_mapping(_UpperCAmelCase ) _UpperCAmelCase = { 'BertForMaskedLM': ['BertModelTest'], 'BertForMultipleChoice': ['BertModelTest'], 'BertForNextSentencePrediction': ['BertModelTest'], 'BertForPreTraining': ['BertModelTest'], 'BertForQuestionAnswering': ['BertModelTest'], 'BertForSequenceClassification': ['BertModelTest'], 'BertForTokenClassification': ['BertModelTest'], 'BertLMHeadModel': ['BertModelTest'], 'BertModel': ['BertModelTest'], } _UpperCAmelCase = { 'BlipForConditionalGeneration': ['BlipTextImageModelTest'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'], 'BlipForQuestionAnswering': ['BlipVQAModelTest'], 'BlipModel': ['BlipModelTest'], 'BlipTextModel': ['BlipTextModelTest'], 'BlipVisionModel': ['BlipVisionModelTest'], } self.assertEqual(get_test_info.to_json(_UpperCAmelCase ) , _UpperCAmelCase ) self.assertEqual(get_test_info.to_json(_UpperCAmelCase ) , _UpperCAmelCase ) def lowerCamelCase_ ( self ) -> List[str]: _UpperCAmelCase = get_model_to_tester_mapping(_UpperCAmelCase ) _UpperCAmelCase = get_model_to_tester_mapping(_UpperCAmelCase ) _UpperCAmelCase = { 'BertForMaskedLM': ['BertModelTester'], 'BertForMultipleChoice': ['BertModelTester'], 'BertForNextSentencePrediction': ['BertModelTester'], 'BertForPreTraining': ['BertModelTester'], 'BertForQuestionAnswering': ['BertModelTester'], 'BertForSequenceClassification': ['BertModelTester'], 'BertForTokenClassification': ['BertModelTester'], 'BertLMHeadModel': ['BertModelTester'], 'BertModel': ['BertModelTester'], } _UpperCAmelCase = { 'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'], 'BlipForQuestionAnswering': ['BlipVQAModelTester'], 'BlipModel': ['BlipModelTester'], 'BlipTextModel': ['BlipTextModelTester'], 'BlipVisionModel': ['BlipVisionModelTester'], } self.assertEqual(get_test_info.to_json(_UpperCAmelCase ) , _UpperCAmelCase ) self.assertEqual(get_test_info.to_json(_UpperCAmelCase ) , _UpperCAmelCase )
573
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" def __init__( self: str , _UpperCAmelCase: str , _UpperCAmelCase: Optional[int]=7 , _UpperCAmelCase: Union[str, Any]=3 , _UpperCAmelCase: int=18 , _UpperCAmelCase: List[Any]=30 , _UpperCAmelCase: List[Any]=400 , _UpperCAmelCase: Optional[Any]=True , _UpperCAmelCase: Any=None , _UpperCAmelCase: Any=True , _UpperCAmelCase: int=None , _UpperCAmelCase: Union[str, Any]=True , ): _lowerCAmelCase :Tuple = size if size is not None else {'shortest_edge': 20} _lowerCAmelCase :str = crop_size if crop_size is not None else {'height': 18, 'width': 18} _lowerCAmelCase :str = parent _lowerCAmelCase :List[Any] = batch_size _lowerCAmelCase :Optional[Any] = num_channels _lowerCAmelCase :Optional[Any] = image_size _lowerCAmelCase :int = min_resolution _lowerCAmelCase :List[str] = max_resolution _lowerCAmelCase :List[str] = do_resize _lowerCAmelCase :Optional[int] = size _lowerCAmelCase :str = do_center_crop _lowerCAmelCase :int = crop_size _lowerCAmelCase :Optional[int] = do_flip_channel_order def SCREAMING_SNAKE_CASE__ ( self: List[Any] ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class UpperCAmelCase_ (snake_case__ , unittest.TestCase ): """simple docstring""" lowerCamelCase : Any = MobileViTImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): _lowerCAmelCase :Optional[Any] = MobileViTImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE__ ( self: str ): return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ): _lowerCAmelCase :str = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize' ) ) self.assertTrue(hasattr(_UpperCAmelCase , 'size' ) ) self.assertTrue(hasattr(_UpperCAmelCase , 'do_center_crop' ) ) self.assertTrue(hasattr(_UpperCAmelCase , 'center_crop' ) ) self.assertTrue(hasattr(_UpperCAmelCase , 'do_flip_channel_order' ) ) def SCREAMING_SNAKE_CASE__ ( self: Any ): _lowerCAmelCase :Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 20} ) self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} ) _lowerCAmelCase :Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'shortest_edge': 42} ) self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} ) def SCREAMING_SNAKE_CASE__ ( self: List[Any] ): pass def SCREAMING_SNAKE_CASE__ ( self: int ): # Initialize image_processing _lowerCAmelCase :Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _lowerCAmelCase :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , Image.Image ) # Test not batched input _lowerCAmelCase :Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _lowerCAmelCase :str = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def SCREAMING_SNAKE_CASE__ ( self: Tuple ): # Initialize image_processing _lowerCAmelCase :int = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _lowerCAmelCase :List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , np.ndarray ) # Test not batched input _lowerCAmelCase :List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _lowerCAmelCase :List[str] = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def SCREAMING_SNAKE_CASE__ ( self: Any ): # Initialize image_processing _lowerCAmelCase :Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _lowerCAmelCase :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , torch.Tensor ) # Test not batched input _lowerCAmelCase :List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _lowerCAmelCase :int = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
687
0
def __lowerCAmelCase ( A_ : int = 4_00_00_00 ) -> int: __UpperCAmelCase = [0, 1] __UpperCAmelCase = 0 while fib[i] <= n: fib.append(fib[i] + fib[i + 1] ) if fib[i + 2] > n: break i += 1 __UpperCAmelCase = 0 for j in range(len(A_ ) - 1 ): if fib[j] % 2 == 0: total += fib[j] return total if __name__ == "__main__": print(F"{solution() = }")
221
import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class UpperCAmelCase_ (datasets.BuilderConfig ): """simple docstring""" lowerCamelCase : Optional[datasets.Features] = None class UpperCAmelCase_ (datasets.ArrowBasedBuilder ): """simple docstring""" lowerCamelCase : Any = PandasConfig def SCREAMING_SNAKE_CASE__ ( self: int ): return datasets.DatasetInfo(features=self.config.features ) def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: List[str] ): if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) _lowerCAmelCase :Dict = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_UpperCAmelCase , (str, list, tuple) ): _lowerCAmelCase :Any = data_files if isinstance(_UpperCAmelCase , _UpperCAmelCase ): _lowerCAmelCase :Dict = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive _lowerCAmelCase :List[Any] = [dl_manager.iter_files(_UpperCAmelCase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )] _lowerCAmelCase :Any = [] for split_name, files in data_files.items(): if isinstance(_UpperCAmelCase , _UpperCAmelCase ): _lowerCAmelCase :str = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive _lowerCAmelCase :Union[str, Any] = [dl_manager.iter_files(_UpperCAmelCase ) for file in files] splits.append(datasets.SplitGenerator(name=_UpperCAmelCase , gen_kwargs={'files': files} ) ) return splits def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: pa.Table ): if self.config.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example _lowerCAmelCase :str = table_cast(_UpperCAmelCase , self.config.features.arrow_schema ) return pa_table def SCREAMING_SNAKE_CASE__ ( self: List[str] , _UpperCAmelCase: Dict ): for i, file in enumerate(itertools.chain.from_iterable(_UpperCAmelCase ) ): with open(_UpperCAmelCase , 'rb' ) as f: _lowerCAmelCase :Optional[Any] = pa.Table.from_pandas(pd.read_pickle(_UpperCAmelCase ) ) yield i, self._cast_table(_UpperCAmelCase )
687
0
'''simple docstring''' import os def lowercase_ ( ) -> int: """simple docstring""" lowercase : str =os.path.join(os.path.dirname(__A ) , '''num.txt''' ) with open(__A ) as file_hand: return str(sum(int(__A ) for line in file_hand ) )[:1_0] if __name__ == "__main__": print(solution())
94
import glob import os import random from string import ascii_lowercase, digits import cva a = """""" a = """""" a = """""" a = 1 # (0 is vertical, 1 is horizontal) def UpperCamelCase_( ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase :Union[str, Any] = get_dataset(__magic_name__ , __magic_name__ ) print('Processing...' ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :str = update_image_and_anno(__magic_name__ , __magic_name__ , __magic_name__ ) for index, image in enumerate(__magic_name__ ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' _lowerCAmelCase :Optional[Any] = random_chars(32 ) _lowerCAmelCase :str = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0] _lowerCAmelCase :Tuple = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}""" cva.imwrite(f"""/{file_root}.jpg""" , __magic_name__ , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f"""Success {index+1}/{len(__magic_name__ )} with {file_name}""" ) _lowerCAmelCase :str = [] for anno in new_annos[index]: _lowerCAmelCase :List[str] = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}""" annos_list.append(__magic_name__ ) with open(f"""/{file_root}.txt""" , 'w' ) as outfile: outfile.write('\n'.join(line for line in annos_list ) ) def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str ): """simple docstring""" _lowerCAmelCase :int = [] _lowerCAmelCase :Union[str, Any] = [] for label_file in glob.glob(os.path.join(__magic_name__ , '*.txt' ) ): _lowerCAmelCase :Optional[int] = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0] with open(__magic_name__ ) as in_file: _lowerCAmelCase :Union[str, Any] = in_file.readlines() _lowerCAmelCase :List[Any] = os.path.join(__magic_name__ , f"""{label_name}.jpg""" ) _lowerCAmelCase :Tuple = [] for obj_list in obj_lists: _lowerCAmelCase :Union[str, Any] = obj_list.rstrip('\n' ).split(' ' ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(__magic_name__ ) labels.append(__magic_name__ ) return img_paths, labels def UpperCamelCase_( __magic_name__ : list , __magic_name__ : list , __magic_name__ : int = 1 ): """simple docstring""" _lowerCAmelCase :str = [] _lowerCAmelCase :Any = [] _lowerCAmelCase :Optional[Any] = [] for idx in range(len(__magic_name__ ) ): _lowerCAmelCase :Optional[int] = [] _lowerCAmelCase :Optional[Any] = img_list[idx] path_list.append(__magic_name__ ) _lowerCAmelCase :List[str] = anno_list[idx] _lowerCAmelCase :Optional[Any] = cva.imread(__magic_name__ ) if flip_type == 1: _lowerCAmelCase :List[Any] = cva.flip(__magic_name__ , __magic_name__ ) for bbox in img_annos: _lowerCAmelCase :List[Any] = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: _lowerCAmelCase :List[str] = cva.flip(__magic_name__ , __magic_name__ ) for bbox in img_annos: _lowerCAmelCase :List[str] = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(__magic_name__ ) new_imgs_list.append(__magic_name__ ) return new_imgs_list, new_annos_lists, path_list def UpperCamelCase_( __magic_name__ : int = 32 ): """simple docstring""" assert number_char > 1, "The number of character should greater than 1" _lowerCAmelCase :str = ascii_lowercase + digits return "".join(random.choice(__magic_name__ ) for _ in range(__magic_name__ ) ) if __name__ == "__main__": main() print("""DONE ✅""")
687
0
_lowerCamelCase : Optional[int] = "Alexander Joslin" import operator as op from .stack import Stack def _UpperCAmelCase (UpperCamelCase_ : str ): '''simple docstring''' _lowerCAmelCase : Tuple = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub} _lowerCAmelCase : Stack[int] = Stack() _lowerCAmelCase : Stack[str] = Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(UpperCamelCase_ ) ) elif i in operators: # RULE 2 operator_stack.push(UpperCamelCase_ ) elif i == ")": # RULE 4 _lowerCAmelCase : Tuple = operator_stack.peek() operator_stack.pop() _lowerCAmelCase : Optional[int] = operand_stack.peek() operand_stack.pop() _lowerCAmelCase : Any = operand_stack.peek() operand_stack.pop() _lowerCAmelCase : Any = operators[opr](UpperCamelCase_ , UpperCamelCase_ ) operand_stack.push(UpperCamelCase_ ) # RULE 5 return operand_stack.peek() if __name__ == "__main__": _lowerCamelCase : Optional[Any] = "(5 + ((4 * 2) * (2 + 3)))" # answer = 45 print(F'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
429
import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging a = logging.get_logger(__name__) def UpperCamelCase_( __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ): """simple docstring""" _lowerCAmelCase :Optional[Any] = nn.functional.normalize(__magic_name__ ) _lowerCAmelCase :List[str] = nn.functional.normalize(__magic_name__ ) return torch.mm(__magic_name__ , normalized_text_embeds.t() ) class UpperCAmelCase_ (snake_case__ ): """simple docstring""" lowerCamelCase : str = CLIPConfig lowerCamelCase : Any = ['CLIPEncoderLayer'] def __init__( self: Optional[int] , _UpperCAmelCase: CLIPConfig ): super().__init__(_UpperCAmelCase ) _lowerCAmelCase :Any = CLIPVisionModel(config.vision_config ) _lowerCAmelCase :Optional[int] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=_UpperCAmelCase ) _lowerCAmelCase :int = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=_UpperCAmelCase ) _lowerCAmelCase :Any = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=_UpperCAmelCase ) _lowerCAmelCase :str = nn.Parameter(torch.ones(17 ) , requires_grad=_UpperCAmelCase ) _lowerCAmelCase :Optional[int] = nn.Parameter(torch.ones(3 ) , requires_grad=_UpperCAmelCase ) @torch.no_grad() def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Dict ): _lowerCAmelCase :str = self.vision_model(_UpperCAmelCase )[1] # pooled_output _lowerCAmelCase :Union[str, Any] = self.visual_projection(_UpperCAmelCase ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 _lowerCAmelCase :Optional[int] = cosine_distance(_UpperCAmelCase , self.special_care_embeds ).cpu().float().numpy() _lowerCAmelCase :List[str] = cosine_distance(_UpperCAmelCase , self.concept_embeds ).cpu().float().numpy() _lowerCAmelCase :str = [] _lowerCAmelCase :List[Any] = image_embeds.shape[0] for i in range(_UpperCAmelCase ): _lowerCAmelCase :Optional[Any] = {'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images _lowerCAmelCase :List[Any] = 0.0 for concept_idx in range(len(special_cos_dist[0] ) ): _lowerCAmelCase :List[Any] = special_cos_dist[i][concept_idx] _lowerCAmelCase :Dict = self.special_care_embeds_weights[concept_idx].item() _lowerCAmelCase :List[Any] = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img['special_scores'][concept_idx]} ) _lowerCAmelCase :Any = 0.0_1 for concept_idx in range(len(cos_dist[0] ) ): _lowerCAmelCase :Union[str, Any] = cos_dist[i][concept_idx] _lowerCAmelCase :str = self.concept_embeds_weights[concept_idx].item() _lowerCAmelCase :str = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(_UpperCAmelCase ) result.append(_UpperCAmelCase ) _lowerCAmelCase :Any = [len(res['bad_concepts'] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def SCREAMING_SNAKE_CASE__ ( self: str , _UpperCAmelCase: torch.FloatTensor , _UpperCAmelCase: torch.FloatTensor ): _lowerCAmelCase :Optional[int] = self.vision_model(_UpperCAmelCase )[1] # pooled_output _lowerCAmelCase :Union[str, Any] = self.visual_projection(_UpperCAmelCase ) _lowerCAmelCase :Dict = cosine_distance(_UpperCAmelCase , self.special_care_embeds ) _lowerCAmelCase :List[str] = cosine_distance(_UpperCAmelCase , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images _lowerCAmelCase :Any = 0.0 _lowerCAmelCase :Union[str, Any] = special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) _lowerCAmelCase :Tuple = torch.any(special_scores > 0 , dim=1 ) _lowerCAmelCase :List[str] = special_care * 0.0_1 _lowerCAmelCase :Any = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) _lowerCAmelCase :Optional[Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) _lowerCAmelCase :List[str] = torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
687
0
"""simple docstring""" import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, UNetaDConditionModel, VideoToVideoSDPipeline, ) from diffusers.utils import floats_tensor, is_xformers_available, skip_mps from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class lowerCAmelCase__ ( snake_case__ , unittest.TestCase ): """simple docstring""" __UpperCAmelCase : str = VideoToVideoSDPipeline __UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"} ) - {'image', 'width', 'height'} __UpperCAmelCase : Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"} ) - {'image'} __UpperCAmelCase : int = PipelineTesterMixin.required_optional_params - {'latents'} __UpperCAmelCase : Any = False # No `output_type`. __UpperCAmelCase : str = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback", "callback_steps", ] ) def snake_case ( self : Union[str, Any] ): torch.manual_seed(0 ) __lowercase : str = UNetaDConditionModel( block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=3_2 , attention_head_dim=4 , ) __lowercase : Union[str, Any] = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , ) torch.manual_seed(0 ) __lowercase : int = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_2_8 , ) torch.manual_seed(0 ) __lowercase : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="gelu" , projection_dim=5_1_2 , ) __lowercase : int = CLIPTextModel(_UpperCAmelCase ) __lowercase : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) __lowercase : str = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, } return components def snake_case ( self : List[str] , lowercase__ : Optional[int] , lowercase__ : List[Any]=0 ): # 3 frames __lowercase : int = floats_tensor((1, 3, 3, 3_2, 3_2) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase ) if str(_UpperCAmelCase ).startswith("mps" ): __lowercase : List[str] = torch.manual_seed(_UpperCAmelCase ) else: __lowercase : Tuple = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase ) __lowercase : List[str] = { 'prompt': 'A painting of a squirrel eating a burger', 'video': video, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'pt', } return inputs def snake_case ( self : List[Any] ): __lowercase : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator __lowercase : Optional[Any] = self.get_dummy_components() __lowercase : Union[str, Any] = VideoToVideoSDPipeline(**_UpperCAmelCase ) __lowercase : int = sd_pipe.to(_UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) __lowercase : Any = self.get_dummy_inputs(_UpperCAmelCase ) __lowercase : Optional[int] = 'np' __lowercase : Optional[int] = sd_pipe(**_UpperCAmelCase ).frames __lowercase : str = frames[0][-3:, -3:, -1] assert frames[0].shape == (3_2, 3_2, 3) __lowercase : int = np.array([1_0_6, 1_1_7, 1_1_3, 1_7_4, 1_3_7, 1_1_2, 1_4_8, 1_5_1, 1_3_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def snake_case ( self : Optional[int] ): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_UpperCAmelCase , expected_max_diff=5e-3 ) @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." ) def snake_case ( self : Any ): pass @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." ) def snake_case ( self : Dict ): pass @unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." ) def snake_case ( self : List[Any] ): pass def snake_case ( self : Any ): return super().test_progress_bar() @slow @skip_mps class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def snake_case ( self : Tuple ): __lowercase : Optional[Any] = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" , torch_dtype=torch.floataa ) pipe.enable_model_cpu_offload() # 10 frames __lowercase : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 ) __lowercase : Tuple = torch.randn((1, 1_0, 3, 1_0_2_4, 5_7_6) , generator=_UpperCAmelCase ) __lowercase : Optional[int] = video.to("cuda" ) __lowercase : List[Any] = 'Spiderman is surfing' __lowercase : Any = pipe(_UpperCAmelCase , video=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=3 , output_type="pt" ).frames __lowercase : Dict = np.array([-1.0_4_5_8_9_8_4, -1.1_2_7_9_2_9_7, -0.9_6_6_3_0_8_6, -0.9_1_5_0_3_9_0_6, -0.7_5_0_9_7_6_5_6] ) assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
575
from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance a = 6_3_7_8_1_3_7.0 a = 6_3_5_6_7_5_2.3_1_4_2_4_5 a = 6_378_137 def UpperCamelCase_( __magic_name__ : float , __magic_name__ : float , __magic_name__ : float , __magic_name__ : float ): """simple docstring""" _lowerCAmelCase :List[Any] = (AXIS_A - AXIS_B) / AXIS_A # Parametric latitudes # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude _lowerCAmelCase :Union[str, Any] = atan((1 - flattening) * tan(radians(__magic_name__ ) ) ) _lowerCAmelCase :List[str] = atan((1 - flattening) * tan(radians(__magic_name__ ) ) ) # Compute central angle between two points # using haversine theta. sigma = haversine_distance / equatorial radius _lowerCAmelCase :int = haversine_distance(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) / EQUATORIAL_RADIUS # Intermediate P and Q values _lowerCAmelCase :str = (b_lata + b_lata) / 2 _lowerCAmelCase :Tuple = (b_lata - b_lata) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) _lowerCAmelCase :str = (sin(__magic_name__ ) ** 2) * (cos(__magic_name__ ) ** 2) _lowerCAmelCase :Optional[int] = cos(sigma / 2 ) ** 2 _lowerCAmelCase :List[Any] = (sigma - sin(__magic_name__ )) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) _lowerCAmelCase :Dict = (cos(__magic_name__ ) ** 2) * (sin(__magic_name__ ) ** 2) _lowerCAmelCase :str = sin(sigma / 2 ) ** 2 _lowerCAmelCase :Union[str, Any] = (sigma + sin(__magic_name__ )) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
687
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) class _snake_case ( snake_case__ ): """simple docstring""" a = 'timm_backbone' def __init__( self : Tuple , _A : str=None , _A : Optional[Any]=3 , _A : List[str]=True , _A : Union[str, Any]=True , _A : str=None , **_A : List[str] , ): """simple docstring""" super().__init__(**_UpperCAmelCase) _SCREAMING_SNAKE_CASE : List[Any] = backbone _SCREAMING_SNAKE_CASE : Dict = num_channels _SCREAMING_SNAKE_CASE : Union[str, Any] = features_only _SCREAMING_SNAKE_CASE : str = use_pretrained_backbone _SCREAMING_SNAKE_CASE : int = True _SCREAMING_SNAKE_CASE : Tuple = out_indices if out_indices is not None else (-1,)
338
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) class UpperCAmelCase_ (snake_case__ ): """simple docstring""" lowerCamelCase : Dict = 'encoder-decoder' lowerCamelCase : Optional[Any] = True def __init__( self: str , **_UpperCAmelCase: int ): super().__init__(**_UpperCAmelCase ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" _lowerCAmelCase :Optional[Any] = kwargs.pop('encoder' ) _lowerCAmelCase :Dict = encoder_config.pop('model_type' ) _lowerCAmelCase :str = kwargs.pop('decoder' ) _lowerCAmelCase :str = decoder_config.pop('model_type' ) from ..auto.configuration_auto import AutoConfig _lowerCAmelCase :str = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase ) _lowerCAmelCase :Tuple = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase ) _lowerCAmelCase :Any = True @classmethod def SCREAMING_SNAKE_CASE__ ( cls: Tuple , _UpperCAmelCase: PretrainedConfig , _UpperCAmelCase: PretrainedConfig , **_UpperCAmelCase: str ): logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' ) _lowerCAmelCase :Dict = True _lowerCAmelCase :List[str] = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Dict ): _lowerCAmelCase :Union[str, Any] = copy.deepcopy(self.__dict__ ) _lowerCAmelCase :Optional[int] = self.encoder.to_dict() _lowerCAmelCase :Union[str, Any] = self.decoder.to_dict() _lowerCAmelCase :List[str] = self.__class__.model_type return output
687
0
from __future__ import annotations import unittest from transformers import LEDConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFLEDForConditionalGeneration, TFLEDModel @require_tf class __a : __snake_case : List[Any] = LEDConfig __snake_case : Tuple = {} __snake_case : int = 'gelu' def __init__( self : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : Any=13 , UpperCAmelCase : str=7 , UpperCAmelCase : List[str]=True , UpperCAmelCase : List[str]=False , UpperCAmelCase : int=99 , UpperCAmelCase : Optional[Any]=32 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : int=4 , UpperCAmelCase : Dict=37 , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : int=20 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : str=1 , UpperCAmelCase : str=0 , UpperCAmelCase : Dict=4 , ): lowerCAmelCase_ : int = parent lowerCAmelCase_ : int = batch_size lowerCAmelCase_ : int = seq_length lowerCAmelCase_ : Any = is_training lowerCAmelCase_ : Tuple = use_labels lowerCAmelCase_ : Optional[Any] = vocab_size lowerCAmelCase_ : Optional[int] = hidden_size lowerCAmelCase_ : Any = num_hidden_layers lowerCAmelCase_ : List[Any] = num_attention_heads lowerCAmelCase_ : str = intermediate_size lowerCAmelCase_ : Union[str, Any] = hidden_dropout_prob lowerCAmelCase_ : List[Any] = attention_probs_dropout_prob lowerCAmelCase_ : Any = max_position_embeddings lowerCAmelCase_ : Union[str, Any] = eos_token_id lowerCAmelCase_ : List[Any] = pad_token_id lowerCAmelCase_ : int = bos_token_id lowerCAmelCase_ : List[Any] = attention_window # `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size # [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention # returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1] # because its local attention only attends to `self.attention_window` and one before and one after lowerCAmelCase_ : int = self.attention_window + 2 # because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for # the `test_attention_outputs` and `test_hidden_states_output` tests lowerCAmelCase_ : List[Any] = ( self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window ) def A ( self : str ): lowerCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) lowerCAmelCase_ : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) lowerCAmelCase_ : Union[str, Any] = tf.concat([input_ids, eos_tensor] , axis=1 ) lowerCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase_ : Optional[Any] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , ) lowerCAmelCase_ : Optional[int] = prepare_led_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase_ : Dict = tf.concat( [tf.zeros_like(_UpperCAmelCase )[:, :-1], tf.ones_like(_UpperCAmelCase )[:, -1:]] , axis=-1 , ) lowerCAmelCase_ : Any = global_attention_mask return config, inputs_dict def A ( self : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ): lowerCAmelCase_ : Tuple = TFLEDModel(config=_UpperCAmelCase ).get_decoder() lowerCAmelCase_ : Any = inputs_dict['input_ids'] lowerCAmelCase_ : List[str] = input_ids[:1, :] lowerCAmelCase_ : Optional[Any] = inputs_dict['attention_mask'][:1, :] lowerCAmelCase_ : Dict = 1 # first forward pass lowerCAmelCase_ : Optional[Any] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , use_cache=_UpperCAmelCase ) lowerCAmelCase_ : int = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids lowerCAmelCase_ : List[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCAmelCase_ : Dict = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and lowerCAmelCase_ : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 ) lowerCAmelCase_ : List[str] = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) lowerCAmelCase_ : Optional[int] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )[0] lowerCAmelCase_ : List[str] = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice lowerCAmelCase_ : Optional[int] = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) lowerCAmelCase_ : Optional[int] = output_from_no_past[:, -3:, random_slice_idx] lowerCAmelCase_ : str = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(_UpperCAmelCase , _UpperCAmelCase , rtol=1e-3 ) def __UpperCamelCase ( lowercase__ : List[str] , lowercase__ : int , lowercase__ : Union[str, Any] , lowercase__ : Optional[Any]=None , lowercase__ : int=None , lowercase__ : Union[str, Any]=None , lowercase__ : Union[str, Any]=None , ) -> List[str]: '''simple docstring''' if attention_mask is None: lowerCAmelCase_ : Tuple = tf.cast(tf.math.not_equal(lowercase__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: lowerCAmelCase_ : List[Any] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: lowerCAmelCase_ : Optional[int] = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowerCAmelCase_ : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class __a ( snake_case__ ,snake_case__ ,unittest.TestCase ): __snake_case : str = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else () __snake_case : Optional[Any] = (TFLEDForConditionalGeneration,) if is_tf_available() else () __snake_case : List[str] = ( { 'conversational': TFLEDForConditionalGeneration, 'feature-extraction': TFLEDModel, 'summarization': TFLEDForConditionalGeneration, 'text2text-generation': TFLEDForConditionalGeneration, 'translation': TFLEDForConditionalGeneration, } if is_tf_available() else {} ) __snake_case : int = True __snake_case : Tuple = False __snake_case : Optional[Any] = False __snake_case : Any = False def A ( self : Union[str, Any] ): lowerCAmelCase_ : List[Any] = TFLEDModelTester(self ) lowerCAmelCase_ : List[str] = ConfigTester(self , config_class=_UpperCAmelCase ) def A ( self : str ): self.config_tester.run_common_tests() def A ( self : Optional[int] ): lowerCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*_UpperCAmelCase ) def A ( self : Union[str, Any] ): lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase_ : Optional[Any] = tf.zeros_like(inputs_dict["""attention_mask"""] ) lowerCAmelCase_ : Union[str, Any] = 2 lowerCAmelCase_ : Any = tf.where( tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["""global_attention_mask"""] , ) lowerCAmelCase_ : Tuple = True lowerCAmelCase_ : str = self.model_tester.seq_length lowerCAmelCase_ : List[Any] = self.model_tester.encoder_seq_length def check_decoder_attentions_output(UpperCAmelCase : Dict ): lowerCAmelCase_ : str = outputs.decoder_attentions self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) def check_encoder_attentions_output(UpperCAmelCase : Dict ): lowerCAmelCase_ : Union[str, Any] = [t.numpy() for t in outputs.encoder_attentions] lowerCAmelCase_ : Union[str, Any] = [t.numpy() for t in outputs.encoder_global_attentions] self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , ) self.assertListEqual( list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , ) for model_class in self.all_model_classes: lowerCAmelCase_ : int = True lowerCAmelCase_ : Tuple = False lowerCAmelCase_ : Union[str, Any] = False lowerCAmelCase_ : int = model_class(_UpperCAmelCase ) lowerCAmelCase_ : Union[str, Any] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) lowerCAmelCase_ : Tuple = len(_UpperCAmelCase ) self.assertEqual(config.output_hidden_states , _UpperCAmelCase ) check_encoder_attentions_output(_UpperCAmelCase ) if self.is_encoder_decoder: lowerCAmelCase_ : str = model_class(_UpperCAmelCase ) lowerCAmelCase_ : str = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertEqual(config.output_hidden_states , _UpperCAmelCase ) check_decoder_attentions_output(_UpperCAmelCase ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] lowerCAmelCase_ : Tuple = True lowerCAmelCase_ : Optional[Any] = model_class(_UpperCAmelCase ) lowerCAmelCase_ : Tuple = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertEqual(config.output_hidden_states , _UpperCAmelCase ) check_encoder_attentions_output(_UpperCAmelCase ) # Check attention is always last and order is fine lowerCAmelCase_ : str = True lowerCAmelCase_ : Tuple = True lowerCAmelCase_ : Union[str, Any] = model_class(_UpperCAmelCase ) lowerCAmelCase_ : Optional[int] = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase ) ) self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase ) check_encoder_attentions_output(_UpperCAmelCase ) @unittest.skip("""LED keeps using potentially symbolic tensors in conditionals and breaks tracing.""" ) def A ( self : Tuple ): pass def A ( self : List[str] ): # TODO: Head-masking not yet implement pass def __UpperCamelCase ( lowercase__ : str ) -> Optional[int]: '''simple docstring''' return tf.constant(lowercase__ , dtype=tf.intaa ) __UpperCAmelCase = 1e-4 @slow @require_tf class __a ( unittest.TestCase ): def A ( self : Union[str, Any] ): lowerCAmelCase_ : List[str] = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" ).led # change to intended input here lowerCAmelCase_ : Optional[Any] = _long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] ) lowerCAmelCase_ : Optional[int] = _long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] ) lowerCAmelCase_ : Any = prepare_led_inputs_dict(model.config , _UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase_ : Optional[int] = model(**_UpperCAmelCase )[0] lowerCAmelCase_ : int = (1, 10_24, 7_68) self.assertEqual(output.shape , _UpperCAmelCase ) # change to expected output here lowerCAmelCase_ : Dict = tf.convert_to_tensor( [[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , ) tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-3 ) def A ( self : int ): lowerCAmelCase_ : Any = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" ) # change to intended input here lowerCAmelCase_ : List[Any] = _long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] ) lowerCAmelCase_ : Tuple = _long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] ) lowerCAmelCase_ : Tuple = prepare_led_inputs_dict(model.config , _UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase_ : Tuple = model(**_UpperCAmelCase )[0] lowerCAmelCase_ : Dict = (1, 10_24, model.config.vocab_size) self.assertEqual(output.shape , _UpperCAmelCase ) # change to expected output here lowerCAmelCase_ : List[str] = tf.convert_to_tensor( [[33.65_07, 6.4572, 16.80_89], [5.8739, -2.4238, 11.29_02], [-3.2139, -4.3149, 4.2783]] , ) tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-3 , rtol=1e-3 )
600
import collections import inspect import unittest from transformers import FocalNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, ) from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCAmelCase_ : """simple docstring""" def __init__( self: int , _UpperCAmelCase: Any , _UpperCAmelCase: Tuple=13 , _UpperCAmelCase: Optional[Any]=32 , _UpperCAmelCase: List[Any]=2 , _UpperCAmelCase: Optional[int]=3 , _UpperCAmelCase: Optional[int]=16 , _UpperCAmelCase: Optional[Any]=[32, 64, 128] , _UpperCAmelCase: Optional[int]=[1, 2, 1] , _UpperCAmelCase: int=[2, 2, 4] , _UpperCAmelCase: List[str]=2 , _UpperCAmelCase: Dict=2.0 , _UpperCAmelCase: Optional[int]=True , _UpperCAmelCase: str=0.0 , _UpperCAmelCase: int=0.0 , _UpperCAmelCase: str=0.1 , _UpperCAmelCase: Dict="gelu" , _UpperCAmelCase: Optional[Any]=False , _UpperCAmelCase: Union[str, Any]=True , _UpperCAmelCase: Union[str, Any]=0.0_2 , _UpperCAmelCase: Optional[int]=1e-5 , _UpperCAmelCase: Optional[int]=True , _UpperCAmelCase: Optional[Any]=None , _UpperCAmelCase: Tuple=True , _UpperCAmelCase: str=10 , _UpperCAmelCase: int=8 , _UpperCAmelCase: List[Any]=["stage1", "stage2"] , _UpperCAmelCase: List[Any]=[1, 2] , ): _lowerCAmelCase :Optional[int] = parent _lowerCAmelCase :Dict = batch_size _lowerCAmelCase :Optional[Any] = image_size _lowerCAmelCase :Optional[Any] = patch_size _lowerCAmelCase :List[Any] = num_channels _lowerCAmelCase :Optional[int] = embed_dim _lowerCAmelCase :List[str] = hidden_sizes _lowerCAmelCase :Union[str, Any] = depths _lowerCAmelCase :int = num_heads _lowerCAmelCase :Any = window_size _lowerCAmelCase :List[Any] = mlp_ratio _lowerCAmelCase :Optional[int] = qkv_bias _lowerCAmelCase :Union[str, Any] = hidden_dropout_prob _lowerCAmelCase :Optional[int] = attention_probs_dropout_prob _lowerCAmelCase :Dict = drop_path_rate _lowerCAmelCase :List[Any] = hidden_act _lowerCAmelCase :Tuple = use_absolute_embeddings _lowerCAmelCase :Optional[int] = patch_norm _lowerCAmelCase :Optional[Any] = layer_norm_eps _lowerCAmelCase :Union[str, Any] = initializer_range _lowerCAmelCase :List[str] = is_training _lowerCAmelCase :str = scope _lowerCAmelCase :Optional[int] = use_labels _lowerCAmelCase :List[Any] = type_sequence_label_size _lowerCAmelCase :Union[str, Any] = encoder_stride _lowerCAmelCase :Optional[int] = out_features _lowerCAmelCase :List[str] = out_indices def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): _lowerCAmelCase :Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCAmelCase :Dict = None if self.use_labels: _lowerCAmelCase :List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase :str = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self: int ): return FocalNetConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple ): _lowerCAmelCase :List[Any] = FocalNetModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _lowerCAmelCase :List[str] = model(_UpperCAmelCase ) _lowerCAmelCase :Union[str, Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) _lowerCAmelCase :List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[Any] ): _lowerCAmelCase :Union[str, Any] = FocalNetBackbone(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _lowerCAmelCase :str = model(_UpperCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] ) # verify backbone works with out_features=None _lowerCAmelCase :Optional[int] = None _lowerCAmelCase :Dict = FocalNetBackbone(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _lowerCAmelCase :Any = model(_UpperCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: int , _UpperCAmelCase: Optional[Any] ): _lowerCAmelCase :Any = FocalNetForMaskedImageModeling(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _lowerCAmelCase :str = model(_UpperCAmelCase ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images _lowerCAmelCase :List[Any] = 1 _lowerCAmelCase :List[Any] = FocalNetForMaskedImageModeling(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _lowerCAmelCase :Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _lowerCAmelCase :int = model(_UpperCAmelCase ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: int , _UpperCAmelCase: Dict , _UpperCAmelCase: Optional[int] ): _lowerCAmelCase :Union[str, Any] = self.type_sequence_label_size _lowerCAmelCase :Dict = FocalNetForImageClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _lowerCAmelCase :Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _lowerCAmelCase :Optional[int] = 1 _lowerCAmelCase :Tuple = FocalNetForImageClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _lowerCAmelCase :Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _lowerCAmelCase :List[str] = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): _lowerCAmelCase :Tuple = self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :str = config_and_inputs _lowerCAmelCase :List[str] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase_ (snake_case__ , snake_case__ , unittest.TestCase ): """simple docstring""" lowerCamelCase : Optional[int] = ( ( FocalNetModel, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetBackbone, ) if is_torch_available() else () ) lowerCamelCase : Optional[Any] = ( {'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification} if is_torch_available() else {} ) lowerCamelCase : Tuple = False lowerCamelCase : Union[str, Any] = False lowerCamelCase : Union[str, Any] = False lowerCamelCase : Any = False lowerCamelCase : List[Any] = False def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): _lowerCAmelCase :Tuple = FocalNetModelTester(self ) _lowerCAmelCase :str = ConfigTester(self , config_class=_UpperCAmelCase , embed_dim=37 , has_text_modality=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: List[str] ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ): return def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): _lowerCAmelCase :List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: int ): _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: List[str] ): _lowerCAmelCase :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: str ): _lowerCAmelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase ) @unittest.skip(reason='FocalNet does not use inputs_embeds' ) def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): pass @unittest.skip(reason='FocalNet does not use feedforward chunking' ) def SCREAMING_SNAKE_CASE__ ( self: str ): pass def SCREAMING_SNAKE_CASE__ ( self: Tuple ): _lowerCAmelCase , _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: _lowerCAmelCase :Optional[Any] = model_class(_UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _lowerCAmelCase :Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) ) def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ): _lowerCAmelCase , _lowerCAmelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: _lowerCAmelCase :Tuple = model_class(_UpperCAmelCase ) _lowerCAmelCase :Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase :int = [*signature.parameters.keys()] _lowerCAmelCase :List[str] = ['pixel_values'] self.assertListEqual(arg_names[:1] , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Any , _UpperCAmelCase: int , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Optional[int] ): _lowerCAmelCase :Union[str, Any] = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() with torch.no_grad(): _lowerCAmelCase :Optional[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) _lowerCAmelCase :List[Any] = outputs.hidden_states _lowerCAmelCase :str = getattr( self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase ) # FocalNet has a different seq_length _lowerCAmelCase :Any = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _lowerCAmelCase :List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) _lowerCAmelCase :List[str] = outputs.reshaped_hidden_states self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :int = reshaped_hidden_states[0].shape _lowerCAmelCase :Optional[int] = ( reshaped_hidden_states[0].view(_UpperCAmelCase , _UpperCAmelCase , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def SCREAMING_SNAKE_CASE__ ( self: List[Any] ): _lowerCAmelCase , _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase :List[str] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes[:-1]: _lowerCAmelCase :Optional[int] = True self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCAmelCase :Dict = True self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ): _lowerCAmelCase , _lowerCAmelCase :str = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase :str = 3 _lowerCAmelCase :Union[str, Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) _lowerCAmelCase :int = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _lowerCAmelCase :Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) _lowerCAmelCase :Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes[:-1]: _lowerCAmelCase :List[str] = True self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCAmelCase :Union[str, Any] = True self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) ) @slow def SCREAMING_SNAKE_CASE__ ( self: int ): for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase :List[Any] = FocalNetModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Tuple ): _lowerCAmelCase , _lowerCAmelCase :int = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase :Optional[int] = _config_zero_init(_UpperCAmelCase ) for model_class in self.all_model_classes: _lowerCAmelCase :str = model_class(config=_UpperCAmelCase ) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @require_vision @require_torch class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" @cached_property def SCREAMING_SNAKE_CASE__ ( self: Dict ): # TODO update organization return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE__ ( self: Any ): _lowerCAmelCase :Tuple = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(_UpperCAmelCase ) _lowerCAmelCase :Union[str, Any] = self.default_image_processor _lowerCAmelCase :Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) _lowerCAmelCase :Any = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase ) # forward pass with torch.no_grad(): _lowerCAmelCase :Dict = model(**_UpperCAmelCase ) # verify the logits _lowerCAmelCase :str = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _UpperCAmelCase ) _lowerCAmelCase :Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(_UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) ) self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 ) @require_torch class UpperCAmelCase_ (snake_case__ , unittest.TestCase ): """simple docstring""" lowerCamelCase : int = (FocalNetBackbone,) if is_torch_available() else () lowerCamelCase : str = FocalNetConfig lowerCamelCase : Union[str, Any] = False def SCREAMING_SNAKE_CASE__ ( self: List[Any] ): _lowerCAmelCase :Any = FocalNetModelTester(self )
687
0
'''simple docstring''' from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def lowerCamelCase__ ( __lowerCamelCase : str = "laptop" ): '''simple docstring''' _UpperCAmelCase : Any =f"https://www.amazon.in/laptop/s?k={product}" _UpperCAmelCase : int ={ 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36', 'Accept-Language': 'en-US, en;q=0.5', } _UpperCAmelCase : Dict =BeautifulSoup(requests.get(__lowerCamelCase , headers=__lowerCamelCase ).text ) # Initialize a Pandas dataframe with the column titles _UpperCAmelCase : Union[str, Any] =DataFrame( columns=[ 'Product Title', 'Product Link', 'Current Price of the product', 'Product Rating', 'MRP of the product', 'Discount', ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( 'div' , attrs={'class': 's-result-item', 'data-component-type': 's-search-result'} , ) , soup.find_all('div' , attrs={'class': 'a-row a-size-base a-color-base'} ) , ): try: _UpperCAmelCase : int =item.ha.text _UpperCAmelCase : Dict ='https://www.amazon.in/' + item.ha.a['href'] _UpperCAmelCase : List[Any] =item.find('span' , attrs={'class': 'a-offscreen'} ).text try: _UpperCAmelCase : Optional[int] =item.find('span' , attrs={'class': 'a-icon-alt'} ).text except AttributeError: _UpperCAmelCase : List[str] ='Not available' try: _UpperCAmelCase : Optional[int] =( '₹' + item.find( 'span' , attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1] ) except AttributeError: _UpperCAmelCase : Optional[int] ='' try: _UpperCAmelCase : Optional[int] =float( ( ( float(product_mrp.strip('₹' ).replace(',' , '' ) ) - float(product_price.strip('₹' ).replace(',' , '' ) ) ) / float(product_mrp.strip('₹' ).replace(',' , '' ) ) ) * 1_0_0 ) except ValueError: _UpperCAmelCase : List[str] =float('nan' ) except AttributeError: pass _UpperCAmelCase : Optional[Any] =[ product_title, product_link, product_price, product_rating, product_mrp, discount, ] _UpperCAmelCase : Any =' ' _UpperCAmelCase : Tuple =' ' data_frame.index += 1 return data_frame if __name__ == "__main__": lowercase ='headphones' get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
446
import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel a = HfApi() a = {} # fmt: off a = torch.tensor([ -0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7, 1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9, -1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9, 0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7 ]) a = torch.tensor([ -2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6, 1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8, -2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8, 2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5 ]) a = torch.tensor([ -0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9, -0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4, -0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5, 0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3 ]) a = torch.tensor([ 0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2, -0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9, 0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5, -0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5 ]) a = torch.tensor([ 0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3, -0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5, 0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9, -0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6 ]) a = torch.tensor([ 0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8, -0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0, 0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3, -0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1 ]) a = torch.tensor([ 0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2, -0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8, 0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4, -0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0 ]) a = torch.tensor([ 0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2, -0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0, 0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6, -0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3 ]) a = torch.tensor([ -1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0, 1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3, -2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0, 1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1]) a = torch.tensor([ -1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4, 0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1, -2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9, 1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6 ]) a = torch.tensor([ -1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2, 0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7, -2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1, 1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5 ]) a = torch.tensor([ -2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9, 1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1, -3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1, 3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6 ]) a = torch.tensor([ -2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0, 1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8, -2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5, 2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3 ]) a = torch.tensor([ -2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6, 1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8, -3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0, 3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3 ]) a = torch.tensor([ -1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4, 1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1, -2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9, 1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9 ]) # fmt: on a = api.list_models(filter="""diffusers""") for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": a = """/home/patrick/google_checkpoints/""" + mod.modelId.split("""/""")[-1] print(F'''Started running {mod.modelId}!!!''') if mod.modelId.startswith("""CompVis"""): a = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""") else: a = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) a = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) a = torch.tensor([10] * noise.shape[0]) with torch.no_grad(): a = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1E-3 ) print(F'''{mod.modelId} has passed successfully!!!''')
687
0
from collections import deque def a__ ( A__ ): SCREAMING_SNAKE_CASE_ : Dict = len(A__ ) SCREAMING_SNAKE_CASE_ : int = deque() SCREAMING_SNAKE_CASE_ : List[Any] = [False for _ in range(A__ )] SCREAMING_SNAKE_CASE_ : str = [-1 for _ in range(A__ )] SCREAMING_SNAKE_CASE_ : List[str] = index_of[:] def strong_connect(A__, A__, A__ ): SCREAMING_SNAKE_CASE_ : int = index # the number when this node is seen SCREAMING_SNAKE_CASE_ : Dict = index # lowest rank node reachable from here index += 1 stack.append(A__ ) SCREAMING_SNAKE_CASE_ : Dict = True for w in g[v]: if index_of[w] == -1: SCREAMING_SNAKE_CASE_ : Union[str, Any] = strong_connect(A__, A__, A__ ) SCREAMING_SNAKE_CASE_ : Optional[int] = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) elif on_stack[w]: SCREAMING_SNAKE_CASE_ : Optional[Any] = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) if lowlink_of[v] == index_of[v]: SCREAMING_SNAKE_CASE_ : Union[str, Any] = [] SCREAMING_SNAKE_CASE_ : Optional[int] = stack.pop() SCREAMING_SNAKE_CASE_ : Dict = False component.append(A__ ) while w != v: SCREAMING_SNAKE_CASE_ : Union[str, Any] = stack.pop() SCREAMING_SNAKE_CASE_ : str = False component.append(A__ ) components.append(A__ ) return index SCREAMING_SNAKE_CASE_ : Union[str, Any] = [] for v in range(A__ ): if index_of[v] == -1: strong_connect(A__, 0, A__ ) return components def a__ ( A__, A__ ): SCREAMING_SNAKE_CASE_ : Union[str, Any] = [[] for _ in range(A__ )] for u, v in edges: g[u].append(A__ ) return g if __name__ == "__main__": # Test lowerCAmelCase__ : Any =7 lowerCAmelCase__ : int =[0, 0, 1, 2, 3, 3, 4, 4, 6] lowerCAmelCase__ : int =[1, 3, 2, 0, 1, 4, 5, 6, 5] lowerCAmelCase__ : List[Any] =[(u, v) for u, v in zip(source, target)] lowerCAmelCase__ : List[Any] =create_graph(n_vertices, edges) assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
101
import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE__ ( self: int ): _lowerCAmelCase :Optional[int] = 10 def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): _lowerCAmelCase :str = [1, 2, 3, 4] _lowerCAmelCase :Union[str, Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: int ): _lowerCAmelCase :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] _lowerCAmelCase :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ): _lowerCAmelCase :Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] _lowerCAmelCase :Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: List[str] ): _lowerCAmelCase :List[str] = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.' _lowerCAmelCase , _lowerCAmelCase :Optional[Any] = process_story(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , [] ) def SCREAMING_SNAKE_CASE__ ( self: Any ): _lowerCAmelCase :Optional[int] = '' _lowerCAmelCase , _lowerCAmelCase :str = process_story(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , [] ) self.assertEqual(_UpperCAmelCase , [] ) def SCREAMING_SNAKE_CASE__ ( self: str ): _lowerCAmelCase :Optional[Any] = ( 'It was the year of Our Lord one thousand seven hundred and ' 'seventy-five\n\nSpiritual revelations were conceded to England ' 'at that favoured period, as at this.\n@highlight\n\nIt was the best of times' ) _lowerCAmelCase , _lowerCAmelCase :Optional[int] = process_story(_UpperCAmelCase ) _lowerCAmelCase :Optional[Any] = [ 'It was the year of Our Lord one thousand seven hundred and seventy-five.', 'Spiritual revelations were conceded to England at that favoured period, as at this.', ] self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) _lowerCAmelCase :Optional[int] = ['It was the best of times.'] self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Tuple ): _lowerCAmelCase :Union[str, Any] = torch.tensor([1, 2, 3, 4] ) _lowerCAmelCase :List[Any] = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 0 ).numpy() , expected.numpy() ) def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ): _lowerCAmelCase :List[Any] = torch.tensor([1, 2, 3, 4, 23, 23, 23] ) _lowerCAmelCase :Optional[int] = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 23 ).numpy() , expected.numpy() ) def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): _lowerCAmelCase :Tuple = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) _lowerCAmelCase :List[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 1 ).numpy() , expected.numpy() ) def SCREAMING_SNAKE_CASE__ ( self: str ): _lowerCAmelCase :List[str] = 101 _lowerCAmelCase :Dict = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] ) _lowerCAmelCase :int = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) _lowerCAmelCase :List[str] = compute_token_type_ids(_UpperCAmelCase , _UpperCAmelCase ) np.testing.assert_array_equal(_UpperCAmelCase , _UpperCAmelCase )
687
0
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") _a : Optional[int] = logging.getLogger(__name__) @dataclass class _lowercase : _SCREAMING_SNAKE_CASE : str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) _SCREAMING_SNAKE_CASE : Optional[str] = field( default=snake_case__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) _SCREAMING_SNAKE_CASE : Optional[str] = field( default=snake_case__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) _SCREAMING_SNAKE_CASE : Optional[str] = field( default=snake_case__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) _SCREAMING_SNAKE_CASE : bool = field( default=snake_case__ , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , ) _SCREAMING_SNAKE_CASE : str = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) _SCREAMING_SNAKE_CASE : bool = field( default=snake_case__ , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) @dataclass class _lowercase : _SCREAMING_SNAKE_CASE : Optional[str] = field(default=snake_case__ , metadata={"help": "The input training data file (a text file)."} ) _SCREAMING_SNAKE_CASE : Optional[str] = field( default=snake_case__ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , ) _SCREAMING_SNAKE_CASE : bool = field( default=snake_case__ , metadata={"help": "Overwrite the cached training and evaluation sets"} ) _SCREAMING_SNAKE_CASE : Optional[int] = field( default=snake_case__ , metadata={"help": "The number of processes to use for the preprocessing."} , ) _SCREAMING_SNAKE_CASE : Optional[int] = field( default=snake_case__ , metadata={ "help": ( "The maximum total input sequence length after tokenization. If passed, sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) _SCREAMING_SNAKE_CASE : bool = field( default=snake_case__ , metadata={ "help": ( "Whether to pad all samples to the maximum sentence length. " "If False, will pad the samples dynamically when batching to the maximum length in the batch. More " "efficient on GPU but very bad for TPU." ) } , ) _SCREAMING_SNAKE_CASE : Optional[int] = field( default=snake_case__ , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) _SCREAMING_SNAKE_CASE : Optional[int] = field( default=snake_case__ , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) def a ( self : Tuple ) -> Union[str, Any]: if self.train_file is not None: __snake_case = self.train_file.split('.' )[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: __snake_case = self.validation_file.split('.' )[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class _lowercase : _SCREAMING_SNAKE_CASE : PreTrainedTokenizerBase _SCREAMING_SNAKE_CASE : Union[bool, str, PaddingStrategy] = True _SCREAMING_SNAKE_CASE : Optional[int] = None _SCREAMING_SNAKE_CASE : Optional[int] = None def __call__( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> List[Any]: __snake_case = 'label' if 'label' in features[0].keys() else 'labels' __snake_case = [feature.pop(_UpperCAmelCase ) for feature in features] __snake_case = len(_UpperCAmelCase ) __snake_case = len(features[0]['input_ids'] ) __snake_case = [ [{k: v[i] for k, v in feature.items()} for i in range(_UpperCAmelCase )] for feature in features ] __snake_case = list(chain(*_UpperCAmelCase ) ) __snake_case = self.tokenizer.pad( _UpperCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , ) # Un-flatten __snake_case = {k: v.view(_UpperCAmelCase , _UpperCAmelCase , -1 ) for k, v in batch.items()} # Add back labels __snake_case = torch.tensor(_UpperCAmelCase , dtype=torch.intaa ) return batch def _a () -> Union[str, Any]: """simple docstring""" __snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __snake_case = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_swag' , lowercase__ , lowercase__ ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() __snake_case = training_args.get_process_log_level() logger.setLevel(lowercase__ ) datasets.utils.logging.set_verbosity(lowercase__ ) transformers.utils.logging.set_verbosity(lowercase__ ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(f'Training/evaluation parameters {training_args}' ) # Detecting last checkpoint. __snake_case = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __snake_case = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. ' 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: __snake_case = {} if data_args.train_file is not None: __snake_case = data_args.train_file if data_args.validation_file is not None: __snake_case = data_args.validation_file __snake_case = data_args.train_file.split('.' )[-1] __snake_case = load_dataset( lowercase__ , data_files=lowercase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: # Downloading and loading the swag dataset from the hub. __snake_case = load_dataset( 'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __snake_case = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) __snake_case = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) __snake_case = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # When using your own dataset or a different dataset from swag, you will probably need to change this. __snake_case = [f'ending{i}' for i in range(4 )] __snake_case = 'sent1' __snake_case = 'sent2' if data_args.max_seq_length is None: __snake_case = tokenizer.model_max_length if max_seq_length > 1_0_2_4: logger.warning( 'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value' ' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can' ' override this default with `--block_size xxx`.' ) __snake_case = 1_0_2_4 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the' f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' ) __snake_case = min(data_args.max_seq_length , tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(lowercase__ : Optional[int] ): __snake_case = [[context] * 4 for context in examples[context_name]] __snake_case = examples[question_header_name] __snake_case = [ [f'{header} {examples[end][i]}' for end in ending_names] for i, header in enumerate(lowercase__ ) ] # Flatten out __snake_case = list(chain(*lowercase__ ) ) __snake_case = list(chain(*lowercase__ ) ) # Tokenize __snake_case = tokenizer( lowercase__ , lowercase__ , truncation=lowercase__ , max_length=lowercase__ , padding='max_length' if data_args.pad_to_max_length else False , ) # Un-flatten return {k: [v[i : i + 4] for i in range(0 , len(lowercase__ ) , 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError('--do_train requires a train dataset' ) __snake_case = raw_datasets['train'] if data_args.max_train_samples is not None: __snake_case = min(len(lowercase__ ) , data_args.max_train_samples ) __snake_case = train_dataset.select(range(lowercase__ ) ) with training_args.main_process_first(desc='train dataset map pre-processing' ): __snake_case = train_dataset.map( lowercase__ , batched=lowercase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError('--do_eval requires a validation dataset' ) __snake_case = raw_datasets['validation'] if data_args.max_eval_samples is not None: __snake_case = min(len(lowercase__ ) , data_args.max_eval_samples ) __snake_case = eval_dataset.select(range(lowercase__ ) ) with training_args.main_process_first(desc='validation dataset map pre-processing' ): __snake_case = eval_dataset.map( lowercase__ , batched=lowercase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) # Data collator __snake_case = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=lowercase__ , pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(lowercase__ : str ): __snake_case = eval_predictions __snake_case = np.argmax(lowercase__ , axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer __snake_case = Trainer( model=lowercase__ , args=lowercase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , compute_metrics=lowercase__ , ) # Training if training_args.do_train: __snake_case = None if training_args.resume_from_checkpoint is not None: __snake_case = training_args.resume_from_checkpoint elif last_checkpoint is not None: __snake_case = last_checkpoint __snake_case = trainer.train(resume_from_checkpoint=lowercase__ ) trainer.save_model() # Saves the tokenizer too for easy upload __snake_case = train_result.metrics __snake_case = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase__ ) ) __snake_case = min(lowercase__ , len(lowercase__ ) ) trainer.log_metrics('train' , lowercase__ ) trainer.save_metrics('train' , lowercase__ ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('*** Evaluate ***' ) __snake_case = trainer.evaluate() __snake_case = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase__ ) __snake_case = min(lowercase__ , len(lowercase__ ) ) trainer.log_metrics('eval' , lowercase__ ) trainer.save_metrics('eval' , lowercase__ ) __snake_case = { 'finetuned_from': model_args.model_name_or_path, 'tasks': 'multiple-choice', 'dataset_tags': 'swag', 'dataset_args': 'regular', 'dataset': 'SWAG', 'language': 'en', } if training_args.push_to_hub: trainer.push_to_hub(**lowercase__ ) else: trainer.create_model_card(**lowercase__ ) def _a (lowercase__ : str ) -> Dict: """simple docstring""" main() if __name__ == "__main__": main()
56
def UpperCamelCase_( __magic_name__ : int ): """simple docstring""" return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number if __name__ == "__main__": print("""Program to check whether a number is a Perfect number or not...""") a = int(input("""Enter number: """).strip()) print(F'''{number} is {'' if perfect(number) else 'not '}a Perfect Number.''')
687
0
from __future__ import annotations import math def _lowerCamelCase ( __A : int ) -> int: if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(__A ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True SCREAMING_SNAKE_CASE = [num for num in range(3, 100001, 2) if not is_prime(num)] def _lowerCamelCase ( __A : int ) -> Optional[Any]: if not isinstance(__A , __A ): raise ValueError('''n must be an integer''' ) if n <= 0: raise ValueError('''n must be >= 0''' ) _UpperCAmelCase : Optional[int] = [] for num in range(len(__A ) ): _UpperCAmelCase : Tuple = 0 while 2 * i * i <= odd_composites[num]: _UpperCAmelCase : Union[str, Any] = odd_composites[num] - 2 * i * i if is_prime(__A ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(__A ) == n: return list_nums return [] def _lowerCamelCase ( ) -> Dict: return compute_nums(1 )[0] if __name__ == "__main__": print(F'{solution() = }')
485
from __future__ import annotations from collections.abc import MutableSequence class UpperCAmelCase_ : """simple docstring""" def __init__( self: List[Any] , _UpperCAmelCase: int , _UpperCAmelCase: MutableSequence[float] ): if len(_UpperCAmelCase ) != degree + 1: raise ValueError( 'The number of coefficients should be equal to the degree + 1.' ) _lowerCAmelCase :list[float] = list(_UpperCAmelCase ) _lowerCAmelCase :Optional[Any] = degree def __add__( self: str , _UpperCAmelCase: Polynomial ): if self.degree > polynomial_a.degree: _lowerCAmelCase :Any = self.coefficients[:] for i in range(polynomial_a.degree + 1 ): coefficients[i] += polynomial_a.coefficients[i] return Polynomial(self.degree , _UpperCAmelCase ) else: _lowerCAmelCase :List[Any] = polynomial_a.coefficients[:] for i in range(self.degree + 1 ): coefficients[i] += self.coefficients[i] return Polynomial(polynomial_a.degree , _UpperCAmelCase ) def __sub__( self: str , _UpperCAmelCase: Polynomial ): return self + polynomial_a * Polynomial(0 , [-1] ) def __neg__( self: Union[str, Any] ): return Polynomial(self.degree , [-c for c in self.coefficients] ) def __mul__( self: int , _UpperCAmelCase: Polynomial ): _lowerCAmelCase :list[float] = [0] * (self.degree + polynomial_a.degree + 1) for i in range(self.degree + 1 ): for j in range(polynomial_a.degree + 1 ): coefficients[i + j] += ( self.coefficients[i] * polynomial_a.coefficients[j] ) return Polynomial(self.degree + polynomial_a.degree , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: int | float ): _lowerCAmelCase :int | float = 0 for i in range(self.degree + 1 ): result += self.coefficients[i] * (substitution**i) return result def __str__( self: Union[str, Any] ): _lowerCAmelCase :Dict = '' for i in range(self.degree , -1 , -1 ): if self.coefficients[i] == 0: continue elif self.coefficients[i] > 0: if polynomial: polynomial += " + " else: polynomial += " - " if i == 0: polynomial += str(abs(self.coefficients[i] ) ) elif i == 1: polynomial += str(abs(self.coefficients[i] ) ) + "x" else: polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_UpperCAmelCase ) return polynomial def __repr__( self: Optional[Any] ): return self.__str__() def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ): _lowerCAmelCase :list[float] = [0] * self.degree for i in range(self.degree ): _lowerCAmelCase :Tuple = self.coefficients[i + 1] * (i + 1) return Polynomial(self.degree - 1 , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: int | float = 0 ): _lowerCAmelCase :list[float] = [0] * (self.degree + 2) _lowerCAmelCase :str = constant for i in range(self.degree + 1 ): _lowerCAmelCase :List[str] = self.coefficients[i] / (i + 1) return Polynomial(self.degree + 1 , _UpperCAmelCase ) def __eq__( self: List[Any] , _UpperCAmelCase: object ): if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): return False if self.degree != polynomial_a.degree: return False for i in range(self.degree + 1 ): if self.coefficients[i] != polynomial_a.coefficients[i]: return False return True def __ne__( self: Optional[Any] , _UpperCAmelCase: object ): return not self.__eq__(_UpperCAmelCase )
687
0
"""simple docstring""" from tempfile import TemporaryDirectory from unittest import TestCase from unittest.mock import MagicMock, patch from transformers import AutoModel, TFAutoModel from transformers.onnx import FeaturesManager from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch @require_torch @require_tf class lowercase__ ( snake_case__ ): '''simple docstring''' def lowerCamelCase_ ( self ) -> List[Any]: _UpperCAmelCase = SMALL_MODEL_IDENTIFIER _UpperCAmelCase = 'pt' _UpperCAmelCase = 'tf' def lowerCamelCase_ ( self , snake_case ) -> Any: _UpperCAmelCase = AutoModel.from_pretrained(self.test_model ) model_pt.save_pretrained(_UpperCAmelCase ) def lowerCamelCase_ ( self , snake_case ) -> str: _UpperCAmelCase = TFAutoModel.from_pretrained(self.test_model , from_pt=_UpperCAmelCase ) model_tf.save_pretrained(_UpperCAmelCase ) def lowerCamelCase_ ( self ) -> Union[str, Any]: _UpperCAmelCase = 'mock_framework' # Framework provided - return whatever the user provides _UpperCAmelCase = FeaturesManager.determine_framework(self.test_model , _UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) # Local checkpoint and framework provided - return provided framework # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(_UpperCAmelCase ) _UpperCAmelCase = FeaturesManager.determine_framework(_UpperCAmelCase , _UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(_UpperCAmelCase ) _UpperCAmelCase = FeaturesManager.determine_framework(_UpperCAmelCase , _UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) def lowerCamelCase_ ( self ) -> Any: # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(_UpperCAmelCase ) _UpperCAmelCase = FeaturesManager.determine_framework(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , self.framework_pt ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(_UpperCAmelCase ) _UpperCAmelCase = FeaturesManager.determine_framework(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , self.framework_tf ) # Invalid local checkpoint with TemporaryDirectory() as local_invalid_ckpt: with self.assertRaises(_UpperCAmelCase ): _UpperCAmelCase = FeaturesManager.determine_framework(_UpperCAmelCase ) def lowerCamelCase_ ( self ) -> Optional[int]: _UpperCAmelCase = MagicMock(return_value=_UpperCAmelCase ) with patch('transformers.onnx.features.is_tf_available' , _UpperCAmelCase ): _UpperCAmelCase = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(_UpperCAmelCase , self.framework_pt ) # PyTorch not in environment -> use TensorFlow _UpperCAmelCase = MagicMock(return_value=_UpperCAmelCase ) with patch('transformers.onnx.features.is_torch_available' , _UpperCAmelCase ): _UpperCAmelCase = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(_UpperCAmelCase , self.framework_tf ) # Both in environment -> use PyTorch _UpperCAmelCase = MagicMock(return_value=_UpperCAmelCase ) _UpperCAmelCase = MagicMock(return_value=_UpperCAmelCase ) with patch('transformers.onnx.features.is_tf_available' , _UpperCAmelCase ), patch( 'transformers.onnx.features.is_torch_available' , _UpperCAmelCase ): _UpperCAmelCase = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(_UpperCAmelCase , self.framework_pt ) # Both not in environment -> raise error _UpperCAmelCase = MagicMock(return_value=_UpperCAmelCase ) _UpperCAmelCase = MagicMock(return_value=_UpperCAmelCase ) with patch('transformers.onnx.features.is_tf_available' , _UpperCAmelCase ), patch( 'transformers.onnx.features.is_torch_available' , _UpperCAmelCase ): with self.assertRaises(_UpperCAmelCase ): _UpperCAmelCase = FeaturesManager.determine_framework(self.test_model )
573
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available a = { """configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ """GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTNeoForCausalLM""", """GPTNeoForQuestionAnswering""", """GPTNeoForSequenceClassification""", """GPTNeoForTokenClassification""", """GPTNeoModel""", """GPTNeoPreTrainedModel""", """load_tf_weights_in_gpt_neo""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ """FlaxGPTNeoForCausalLM""", """FlaxGPTNeoModel""", """FlaxGPTNeoPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neo import ( GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, GPTNeoPreTrainedModel, load_tf_weights_in_gpt_neo, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel else: import sys a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
687
0
import os from itertools import chain from random import randrange, shuffle import pytest from .sola import PokerHand a_ = ( """4S 3H 2C 7S 5H""", """9D 8H 2C 6S 7H""", """2D 6D 9D TH 7D""", """TC 8C 2S JH 6C""", """JH 8S TH AH QH""", """TS KS 5S 9S AC""", """KD 6S 9D TH AD""", """KS 8D 4D 9S 4S""", # pair """8C 4S KH JS 4D""", # pair """QH 8H KD JH 8S""", # pair """KC 4H KS 2H 8D""", # pair """KD 4S KC 3H 8S""", # pair """AH 8S AS KC JH""", # pair """3H 4C 4H 3S 2H""", # 2 pairs """5S 5D 2C KH KH""", # 2 pairs """3C KH 5D 5S KH""", # 2 pairs """AS 3C KH AD KH""", # 2 pairs """7C 7S 3S 7H 5S""", # 3 of a kind """7C 7S KH 2H 7H""", # 3 of a kind """AC KH QH AH AS""", # 3 of a kind """2H 4D 3C AS 5S""", # straight (low ace) """3C 5C 4C 2C 6H""", # straight """6S 8S 7S 5H 9H""", # straight """JS QS 9H TS KH""", # straight """QC KH TS JS AH""", # straight (high ace) """8C 9C 5C 3C TC""", # flush """3S 8S 9S 5S KS""", # flush """4C 5C 9C 8C KC""", # flush """JH 8H AH KH QH""", # flush """3D 2H 3H 2C 2D""", # full house """2H 2C 3S 3H 3D""", # full house """KH KC 3S 3H 3D""", # full house """JC 6H JS JD JH""", # 4 of a kind """JC 7H JS JD JH""", # 4 of a kind """JC KH JS JD JH""", # 4 of a kind """2S AS 4S 5S 3S""", # straight flush (low ace) """2D 6D 3D 4D 5D""", # straight flush """5C 6C 3C 7C 4C""", # straight flush """JH 9H TH KH QH""", # straight flush """JH AH TH KH QH""", # royal flush (high ace straight flush) ) a_ = ( ("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""), ("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""), ("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""), ("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""), ("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""), ("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""), ("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""), ("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""), ("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""), ("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""), ("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""), ("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""), ("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""), ("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""), ("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""), ("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""), ("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""), ("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""), ("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""), ("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""), ("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""), ("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""), ("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""), ("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""), ("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""), ("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""), ("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""), ("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""), ("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""), ("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""), ("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""), ("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""), ("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""), ) a_ = ( ("""2H 3H 4H 5H 6H""", True), ("""AS AH 2H AD AC""", False), ("""2H 3H 5H 6H 7H""", True), ("""KS AS TS QS JS""", True), ("""8H 9H QS JS TH""", False), ("""AS 3S 4S 8S 2S""", True), ) a_ = ( ("""2H 3H 4H 5H 6H""", True), ("""AS AH 2H AD AC""", False), ("""2H 3H 5H 6H 7H""", False), ("""KS AS TS QS JS""", True), ("""8H 9H QS JS TH""", True), ) a_ = ( ("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 14]), ("""2H 5D 3C AS 5S""", False, [14, 5, 5, 3, 2]), ("""JH QD KC AS TS""", False, [14, 13, 12, 11, 10]), ("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]), ) a_ = ( ("""JH AH TH KH QH""", 0), ("""JH 9H TH KH QH""", 0), ("""JC KH JS JD JH""", 7), ("""KH KC 3S 3H 3D""", 6), ("""8C 9C 5C 3C TC""", 0), ("""JS QS 9H TS KH""", 0), ("""7C 7S KH 2H 7H""", 3), ("""3C KH 5D 5S KH""", 2), ("""QH 8H KD JH 8S""", 1), ("""2D 6D 9D TH 7D""", 0), ) a_ = ( ("""JH AH TH KH QH""", 23), ("""JH 9H TH KH QH""", 22), ("""JC KH JS JD JH""", 21), ("""KH KC 3S 3H 3D""", 20), ("""8C 9C 5C 3C TC""", 19), ("""JS QS 9H TS KH""", 18), ("""7C 7S KH 2H 7H""", 17), ("""3C KH 5D 5S KH""", 16), ("""QH 8H KD JH 8S""", 15), ("""2D 6D 9D TH 7D""", 14), ) def __lowerCAmelCase ( ) -> Optional[int]: __UpperCAmelCase = randrange(len(A_ ) ), randrange(len(A_ ) ) __UpperCAmelCase = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)] __UpperCAmelCase = SORTED_HANDS[play], SORTED_HANDS[oppo] return hand, other, expected def __lowerCAmelCase ( A_ : int = 1_00 ) -> List[Any]: return (generate_random_hand() for _ in range(A_ )) @pytest.mark.parametrize("hand, expected" , A_ ) def __lowerCAmelCase ( A_ : Optional[Any] , A_ : str ) -> Optional[int]: assert PokerHand(A_ )._is_flush() == expected @pytest.mark.parametrize("hand, expected" , A_ ) def __lowerCAmelCase ( A_ : List[str] , A_ : Optional[Any] ) -> Any: assert PokerHand(A_ )._is_straight() == expected @pytest.mark.parametrize("hand, expected, card_values" , A_ ) def __lowerCAmelCase ( A_ : Optional[int] , A_ : Union[str, Any] , A_ : int ) -> Tuple: __UpperCAmelCase = PokerHand(A_ ) assert player._is_five_high_straight() == expected assert player._card_values == card_values @pytest.mark.parametrize("hand, expected" , A_ ) def __lowerCAmelCase ( A_ : List[Any] , A_ : Dict ) -> List[Any]: assert PokerHand(A_ )._is_same_kind() == expected @pytest.mark.parametrize("hand, expected" , A_ ) def __lowerCAmelCase ( A_ : Union[str, Any] , A_ : Any ) -> Union[str, Any]: assert PokerHand(A_ )._hand_type == expected @pytest.mark.parametrize("hand, other, expected" , A_ ) def __lowerCAmelCase ( A_ : Union[str, Any] , A_ : Optional[Any] , A_ : List[Any] ) -> Tuple: assert PokerHand(A_ ).compare_with(PokerHand(A_ ) ) == expected @pytest.mark.parametrize("hand, other, expected" , generate_random_hands() ) def __lowerCAmelCase ( A_ : str , A_ : List[str] , A_ : List[Any] ) -> List[Any]: assert PokerHand(A_ ).compare_with(PokerHand(A_ ) ) == expected def __lowerCAmelCase ( ) -> Any: __UpperCAmelCase = [PokerHand(A_ ) for hand in SORTED_HANDS] __UpperCAmelCase = poker_hands.copy() shuffle(A_ ) __UpperCAmelCase = chain(sorted(A_ ) ) for index, hand in enumerate(A_ ): assert hand == poker_hands[index] def __lowerCAmelCase ( ) -> List[Any]: __UpperCAmelCase = [PokerHand("2D AC 3H 4H 5S" ), PokerHand("2S 3H 4H 5S 6C" )] pokerhands.sort(reverse=A_ ) assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C" def __lowerCAmelCase ( ) -> Optional[Any]: __UpperCAmelCase = PokerHand("2C 4S AS 3D 5C" ) __UpperCAmelCase = True __UpperCAmelCase = [5, 4, 3, 2, 14] for _ in range(10 ): assert pokerhand._is_five_high_straight() == expected assert pokerhand._card_values == expected_card_values def __lowerCAmelCase ( ) -> Dict: __UpperCAmelCase = 0 __UpperCAmelCase = os.path.abspath(os.path.dirname(A_ ) ) __UpperCAmelCase = os.path.join(A_ , "poker_hands.txt" ) with open(A_ ) as file_hand: for line in file_hand: __UpperCAmelCase = line[:14].strip() __UpperCAmelCase = line[15:].strip() __UpperCAmelCase = PokerHand(A_ ), PokerHand(A_ ) __UpperCAmelCase = player.compare_with(A_ ) if output == "Win": answer += 1 assert answer == 3_76
221
from __future__ import annotations from decimal import Decimal from math import * # noqa: F403 from sympy import diff def UpperCamelCase_( __magic_name__ : str , __magic_name__ : float | Decimal , __magic_name__ : float = 10**-10 ): """simple docstring""" _lowerCAmelCase :Optional[Any] = a while True: _lowerCAmelCase :str = Decimal(__magic_name__ ) - ( Decimal(eval(__magic_name__ ) ) / Decimal(eval(str(diff(__magic_name__ ) ) ) ) # noqa: S307 ) # This number dictates the accuracy of the answer if abs(eval(__magic_name__ ) ) < precision: # noqa: S307 return float(__magic_name__ ) # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''') # Find root of polynomial print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}''') # Find Square Root of 5 print(F'''The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}''') # Exponential Roots print(F'''The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}''')
687
0
'''simple docstring''' import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class UpperCAmelCase_ ( snake_case__ ): """simple docstring""" UpperCamelCase_ = ['image_processor', 'tokenizer'] UpperCamelCase_ = 'ChineseCLIPImageProcessor' UpperCamelCase_ = ('BertTokenizer', 'BertTokenizerFast') def __init__( self : List[str] , UpperCAmelCase : Dict=None , UpperCAmelCase : Union[str, Any]=None , **UpperCAmelCase : List[str] ) -> int: '''simple docstring''' lowercase : int =None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , _UpperCAmelCase , ) lowercase : Dict =kwargs.pop('''feature_extractor''' ) lowercase : Dict =image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(_UpperCAmelCase , _UpperCAmelCase ) lowercase : str =self.image_processor def __call__( self : int , UpperCAmelCase : Dict=None , UpperCAmelCase : Dict=None , UpperCAmelCase : List[Any]=None , **UpperCAmelCase : Optional[Any] ) -> str: '''simple docstring''' if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: lowercase : Tuple =self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ) if images is not None: lowercase : Union[str, Any] =self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ) if text is not None and images is not None: lowercase : Tuple =image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase ) def A__ ( self : str , *UpperCAmelCase : Dict , **UpperCAmelCase : Any ) -> List[Any]: '''simple docstring''' return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase ) def A__ ( self : List[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Any ) -> int: '''simple docstring''' return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase ) @property def A__ ( self : Tuple ) -> Union[str, Any]: '''simple docstring''' lowercase : Any =self.tokenizer.model_input_names lowercase : Any =self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def A__ ( self : Dict ) -> List[str]: '''simple docstring''' warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _UpperCAmelCase , ) return self.image_processor_class
94
import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) a = { """sample_size""": 32, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 2, """num_class_embeds""": 1_000, """block_out_channels""": [32, 64], """attention_head_dim""": 8, """down_block_types""": [ """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """scale_shift""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } a = { """sample_size""": 64, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 3, """num_class_embeds""": 1_000, """block_out_channels""": [192, 192 * 2, 192 * 3, 192 * 4], """attention_head_dim""": 64, """down_block_types""": [ """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """AttnUpBlock2D""", """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """scale_shift""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } a = { """sample_size""": 256, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 2, """num_class_embeds""": None, """block_out_channels""": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4], """attention_head_dim""": 64, """down_block_types""": [ """ResnetDownsampleBlock2D""", """ResnetDownsampleBlock2D""", """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """AttnUpBlock2D""", """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", """ResnetUpsampleBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """default""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } a = { """num_train_timesteps""": 40, """sigma_min""": 0.0_0_2, """sigma_max""": 8_0.0, } a = { """num_train_timesteps""": 201, """sigma_min""": 0.0_0_2, """sigma_max""": 8_0.0, } a = { """num_train_timesteps""": 151, """sigma_min""": 0.0_0_2, """sigma_max""": 8_0.0, } def UpperCamelCase_( __magic_name__ : Dict ): """simple docstring""" if isinstance(__magic_name__ , __magic_name__ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError('boolean value expected' ) def UpperCamelCase_( __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any]=False ): """simple docstring""" _lowerCAmelCase :int = checkpoint[f"""{old_prefix}.in_layers.0.weight"""] _lowerCAmelCase :Union[str, Any] = checkpoint[f"""{old_prefix}.in_layers.0.bias"""] _lowerCAmelCase :str = checkpoint[f"""{old_prefix}.in_layers.2.weight"""] _lowerCAmelCase :Optional[Any] = checkpoint[f"""{old_prefix}.in_layers.2.bias"""] _lowerCAmelCase :str = checkpoint[f"""{old_prefix}.emb_layers.1.weight"""] _lowerCAmelCase :Any = checkpoint[f"""{old_prefix}.emb_layers.1.bias"""] _lowerCAmelCase :str = checkpoint[f"""{old_prefix}.out_layers.0.weight"""] _lowerCAmelCase :List[Any] = checkpoint[f"""{old_prefix}.out_layers.0.bias"""] _lowerCAmelCase :Optional[int] = checkpoint[f"""{old_prefix}.out_layers.3.weight"""] _lowerCAmelCase :Dict = checkpoint[f"""{old_prefix}.out_layers.3.bias"""] if has_skip: _lowerCAmelCase :List[Any] = checkpoint[f"""{old_prefix}.skip_connection.weight"""] _lowerCAmelCase :int = checkpoint[f"""{old_prefix}.skip_connection.bias"""] return new_checkpoint def UpperCamelCase_( __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : List[str] , __magic_name__ : List[str]=None ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Tuple = checkpoint[f"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Any = checkpoint[f"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 ) _lowerCAmelCase :int = checkpoint[f"""{old_prefix}.norm.weight"""] _lowerCAmelCase :Dict = checkpoint[f"""{old_prefix}.norm.bias"""] _lowerCAmelCase :Dict = weight_q.squeeze(-1 ).squeeze(-1 ) _lowerCAmelCase :str = bias_q.squeeze(-1 ).squeeze(-1 ) _lowerCAmelCase :List[str] = weight_k.squeeze(-1 ).squeeze(-1 ) _lowerCAmelCase :Optional[Any] = bias_k.squeeze(-1 ).squeeze(-1 ) _lowerCAmelCase :Tuple = weight_v.squeeze(-1 ).squeeze(-1 ) _lowerCAmelCase :List[Any] = bias_v.squeeze(-1 ).squeeze(-1 ) _lowerCAmelCase :int = ( checkpoint[f"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 ) ) _lowerCAmelCase :Optional[Any] = checkpoint[f"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def UpperCamelCase_( __magic_name__ : str , __magic_name__ : Optional[Any] ): """simple docstring""" _lowerCAmelCase :Union[str, Any] = torch.load(__magic_name__ , map_location='cpu' ) _lowerCAmelCase :List[Any] = {} _lowerCAmelCase :List[str] = checkpoint['time_embed.0.weight'] _lowerCAmelCase :Tuple = checkpoint['time_embed.0.bias'] _lowerCAmelCase :Dict = checkpoint['time_embed.2.weight'] _lowerCAmelCase :Union[str, Any] = checkpoint['time_embed.2.bias'] if unet_config["num_class_embeds"] is not None: _lowerCAmelCase :Union[str, Any] = checkpoint['label_emb.weight'] _lowerCAmelCase :str = checkpoint['input_blocks.0.0.weight'] _lowerCAmelCase :str = checkpoint['input_blocks.0.0.bias'] _lowerCAmelCase :List[Any] = unet_config['down_block_types'] _lowerCAmelCase :Any = unet_config['layers_per_block'] _lowerCAmelCase :List[Any] = unet_config['attention_head_dim'] _lowerCAmelCase :Tuple = unet_config['block_out_channels'] _lowerCAmelCase :List[str] = 1 _lowerCAmelCase :Optional[int] = channels_list[0] for i, layer_type in enumerate(__magic_name__ ): _lowerCAmelCase :Tuple = channels_list[i] _lowerCAmelCase :Optional[Any] = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(__magic_name__ ): _lowerCAmelCase :int = f"""down_blocks.{i}.resnets.{j}""" _lowerCAmelCase :List[Any] = f"""input_blocks.{current_layer}.0""" _lowerCAmelCase :int = True if j == 0 and downsample_block_has_skip else False _lowerCAmelCase :List[Any] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(__magic_name__ ): _lowerCAmelCase :List[str] = f"""down_blocks.{i}.resnets.{j}""" _lowerCAmelCase :Optional[int] = f"""input_blocks.{current_layer}.0""" _lowerCAmelCase :List[str] = True if j == 0 and downsample_block_has_skip else False _lowerCAmelCase :Optional[int] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ ) _lowerCAmelCase :Optional[int] = f"""down_blocks.{i}.attentions.{j}""" _lowerCAmelCase :str = f"""input_blocks.{current_layer}.1""" _lowerCAmelCase :Optional[Any] = convert_attention( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) current_layer += 1 if i != len(__magic_name__ ) - 1: _lowerCAmelCase :Union[str, Any] = f"""down_blocks.{i}.downsamplers.0""" _lowerCAmelCase :Tuple = f"""input_blocks.{current_layer}.0""" _lowerCAmelCase :Optional[int] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) current_layer += 1 _lowerCAmelCase :Dict = current_channels # hardcoded the mid-block for now _lowerCAmelCase :int = 'mid_block.resnets.0' _lowerCAmelCase :Optional[Any] = 'middle_block.0' _lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) _lowerCAmelCase :Optional[int] = 'mid_block.attentions.0' _lowerCAmelCase :Optional[int] = 'middle_block.1' _lowerCAmelCase :List[Any] = convert_attention(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) _lowerCAmelCase :Union[str, Any] = 'mid_block.resnets.1' _lowerCAmelCase :Optional[int] = 'middle_block.2' _lowerCAmelCase :int = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) _lowerCAmelCase :Tuple = 0 _lowerCAmelCase :str = unet_config['up_block_types'] for i, layer_type in enumerate(__magic_name__ ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): _lowerCAmelCase :Optional[Any] = f"""up_blocks.{i}.resnets.{j}""" _lowerCAmelCase :Dict = f"""output_blocks.{current_layer}.0""" _lowerCAmelCase :Any = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ ) current_layer += 1 if i != len(__magic_name__ ) - 1: _lowerCAmelCase :Any = f"""up_blocks.{i}.upsamplers.0""" _lowerCAmelCase :Dict = f"""output_blocks.{current_layer-1}.1""" _lowerCAmelCase :Tuple = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): _lowerCAmelCase :Tuple = f"""up_blocks.{i}.resnets.{j}""" _lowerCAmelCase :List[str] = f"""output_blocks.{current_layer}.0""" _lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ ) _lowerCAmelCase :str = f"""up_blocks.{i}.attentions.{j}""" _lowerCAmelCase :List[Any] = f"""output_blocks.{current_layer}.1""" _lowerCAmelCase :int = convert_attention( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) current_layer += 1 if i != len(__magic_name__ ) - 1: _lowerCAmelCase :Optional[int] = f"""up_blocks.{i}.upsamplers.0""" _lowerCAmelCase :int = f"""output_blocks.{current_layer-1}.2""" _lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) _lowerCAmelCase :str = checkpoint['out.0.weight'] _lowerCAmelCase :Union[str, Any] = checkpoint['out.0.bias'] _lowerCAmelCase :List[Any] = checkpoint['out.2.weight'] _lowerCAmelCase :Dict = checkpoint['out.2.bias'] return new_checkpoint if __name__ == "__main__": a = argparse.ArgumentParser() parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""") parser.add_argument( """--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model.""" ) parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""") a = parser.parse_args() a = strabool(args.class_cond) a = os.path.basename(args.unet_path) print(F'''Checkpoint: {ckpt_name}''') # Get U-Net config if "imagenet64" in ckpt_name: a = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): a = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: a = TEST_UNET_CONFIG else: raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''') if not args.class_cond: a = None a = con_pt_to_diffuser(args.unet_path, unet_config) a = UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: a = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: a = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): a = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''') a = CMStochasticIterativeScheduler(**scheduler_config) a = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
687
0
from __future__ import annotations import pandas as pd def _UpperCAmelCase (UpperCamelCase_ : list[int] , UpperCamelCase_ : list[int] , UpperCamelCase_ : int ): '''simple docstring''' _lowerCAmelCase : Dict = [0] * no_of_processes _lowerCAmelCase : Optional[Any] = [0] * no_of_processes # Copy the burst time into remaining_time[] for i in range(UpperCamelCase_ ): _lowerCAmelCase : Dict = burst_time[i] _lowerCAmelCase : List[Any] = 0 _lowerCAmelCase : Tuple = 0 _lowerCAmelCase : Optional[int] = 999999999 _lowerCAmelCase : str = 0 _lowerCAmelCase : List[str] = False # Process until all processes are completed while complete != no_of_processes: for j in range(UpperCamelCase_ ): if arrival_time[j] <= increment_time and remaining_time[j] > 0: if remaining_time[j] < minm: _lowerCAmelCase : List[Any] = remaining_time[j] _lowerCAmelCase : str = j _lowerCAmelCase : Dict = True if not check: increment_time += 1 continue remaining_time[short] -= 1 _lowerCAmelCase : Optional[int] = remaining_time[short] if minm == 0: _lowerCAmelCase : Optional[Any] = 999999999 if remaining_time[short] == 0: complete += 1 _lowerCAmelCase : List[Any] = False # Find finish time of current process _lowerCAmelCase : Dict = increment_time + 1 # Calculate waiting time _lowerCAmelCase : Optional[int] = finish_time - arrival_time[short] _lowerCAmelCase : int = finar - burst_time[short] if waiting_time[short] < 0: _lowerCAmelCase : Tuple = 0 # Increment time increment_time += 1 return waiting_time def _UpperCAmelCase (UpperCamelCase_ : list[int] , UpperCamelCase_ : int , UpperCamelCase_ : list[int] ): '''simple docstring''' _lowerCAmelCase : List[str] = [0] * no_of_processes for i in range(UpperCamelCase_ ): _lowerCAmelCase : Tuple = burst_time[i] + waiting_time[i] return turn_around_time def _UpperCAmelCase (UpperCamelCase_ : list[int] , UpperCamelCase_ : list[int] , UpperCamelCase_ : int ): '''simple docstring''' _lowerCAmelCase : str = 0 _lowerCAmelCase : Tuple = 0 for i in range(UpperCamelCase_ ): _lowerCAmelCase : Union[str, Any] = total_waiting_time + waiting_time[i] _lowerCAmelCase : Union[str, Any] = total_turn_around_time + turn_around_time[i] print(F"Average waiting time = {total_waiting_time / no_of_processes:.5f}" ) print("""Average turn around time =""" , total_turn_around_time / no_of_processes ) if __name__ == "__main__": print("Enter how many process you want to analyze") _lowerCamelCase : List[Any] = int(input()) _lowerCamelCase : List[Any] = [0] * no_of_processes _lowerCamelCase : str = [0] * no_of_processes _lowerCamelCase : List[str] = list(range(1, no_of_processes + 1)) for i in range(no_of_processes): print("Enter the arrival time and burst time for process:--" + str(i + 1)) _lowerCamelCase , _lowerCamelCase : Any = map(int, input().split()) _lowerCamelCase : int = calculate_waitingtime(arrival_time, burst_time, no_of_processes) _lowerCamelCase : List[Any] = burst_time _lowerCamelCase : List[Any] = no_of_processes _lowerCamelCase : Any = waiting_time _lowerCamelCase : Any = calculate_turnaroundtime(bt, n, wt) calculate_average_times(waiting_time, turn_around_time, no_of_processes) _lowerCamelCase : Dict = pd.DataFrame( list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)), columns=[ "Process", "BurstTime", "ArrivalTime", "WaitingTime", "TurnAroundTime", ], ) # Printing the dataFrame pd.set_option("display.max_rows", fcfs.shape[0] + 1) print(fcfs)
429
import os import re import shutil import sys import tempfile import unittest import black a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. a = """ \"\"\" Output class for the scheduler's step function output. Args: prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the denoising loop. pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): The predicted denoised sample (x_{0}) based on the model output from the current timestep. `pred_original_sample` can be used to preview progress or for guidance. \"\"\" prev_sample: torch.FloatTensor pred_original_sample: Optional[torch.FloatTensor] = None """ class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE__ ( self: Dict ): _lowerCAmelCase :Optional[Any] = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) ) _lowerCAmelCase :Tuple = self.diffusers_dir shutil.copy( os.path.join(_UpperCAmelCase , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , ) def SCREAMING_SNAKE_CASE__ ( self: List[Any] ): _lowerCAmelCase :str = 'src/diffusers' shutil.rmtree(self.diffusers_dir ) def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Tuple=None ): _lowerCAmelCase :int = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code if overwrite_result is not None: _lowerCAmelCase :Dict = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result _lowerCAmelCase :Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 ) _lowerCAmelCase :List[str] = black.format_str(_UpperCAmelCase , mode=_UpperCAmelCase ) _lowerCAmelCase :Union[str, Any] = os.path.join(self.diffusers_dir , 'new_code.py' ) with open(_UpperCAmelCase , 'w' , newline='\n' ) as f: f.write(_UpperCAmelCase ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(_UpperCAmelCase ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=_UpperCAmelCase ) with open(_UpperCAmelCase , 'r' ) as f: self.assertTrue(f.read() , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ): _lowerCAmelCase :List[str] = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ): # Base copy consistency self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , ) # With no empty line at the end self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , _UpperCAmelCase , ) # Copy consistency with rename self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , _UpperCAmelCase ) , ) # Copy consistency with a really long name _lowerCAmelCase :Optional[int] = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason' self.check_copy_consistency( f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('Bert' , _UpperCAmelCase , _UpperCAmelCase ) , ) # Copy consistency with overwrite self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , _UpperCAmelCase , overwrite_result=re.sub('DDPM' , 'Test' , _UpperCAmelCase ) , )
687
0
"""simple docstring""" from __future__ import annotations from collections import namedtuple def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) ->Optional[Any]: """simple docstring""" __lowercase : Any = namedtuple("result", "name value" ) if (voltage, current, power).count(0 ) != 1: raise ValueError("Only one argument must be 0" ) elif power < 0: raise ValueError( "Power cannot be negative in any electrical/electronics system" ) elif voltage == 0: return result("voltage", power / current ) elif current == 0: return result("current", power / voltage ) elif power == 0: return result("power", float(round(abs(voltage * current ), 2 ) ) ) else: raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
575
from dataclasses import dataclass, field from typing import Optional @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be trained.'} ) lowerCamelCase : Optional[str] = field( default='./' , metadata={'help': 'Save dir where model repo is cloned and models updates are saved to.'} ) lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path of training dataset.'} ) lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} ) lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for training.'} ) lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for evaluation.'} ) lowerCamelCase : Optional[float] = field(default=0.1 , metadata={'help': 'Value of weight decay.'} ) lowerCamelCase : Optional[int] = field( default=1_00_00 , metadata={'help': 'Size of buffer used to shuffle streaming dataset.'} ) lowerCamelCase : Optional[float] = field(default=2e-4 , metadata={'help': 'Learning rate fo training.'} ) lowerCamelCase : Optional[str] = field(default='cosine' , metadata={'help': 'Learning rate.'} ) lowerCamelCase : Optional[int] = field( default=7_50 , metadata={'help': 'Number of warmup steps in the learning rate schedule.'} ) lowerCamelCase : Optional[int] = field( default=16 , metadata={'help': 'Number of gradient accumulation steps.'} ) lowerCamelCase : Optional[bool] = field( default=snake_case__ , metadata={'help': 'Use gradient checkpointing to reduce memory footprint.'} ) lowerCamelCase : Optional[int] = field(default=5_00_00 , metadata={'help': 'Maximum number of training steps.'} ) lowerCamelCase : Optional[int] = field( default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} ) lowerCamelCase : Optional[int] = field(default=10_24 , metadata={'help': 'Sequence lengths used for training.'} ) lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Training seed.'} ) lowerCamelCase : Optional[int] = field( default=10_24 , metadata={'help': 'Interval to save checkpoints. Measured as number of forward passes not training steps.'} , ) lowerCamelCase : Optional[str] = field( default=snake_case__ , metadata={'help': 'States path if the training should continue from a checkpoint folder.'} ) lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'If True the data is pretokenized.'} ) @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} ) lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} ) lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size used for evaluation.'} ) lowerCamelCase : Optional[int] = field( default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} ) lowerCamelCase : Optional[int] = field(default=10_24 , metadata={'help': 'Length of sequences to be evaluated.'} ) lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} ) @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} ) lowerCamelCase : Optional[int] = field(default=snake_case__ , metadata={'help': 'Number of workers used for code evaluation.'} ) lowerCamelCase : Optional[int] = field( default=snake_case__ , metadata={'help': 'The number of human-eval tasks to run. If not included all tasks are evaluated.'} , ) lowerCamelCase : Optional[bool] = field( default=snake_case__ , metadata={'help': 'Sample from the language model\'s output distribution.'} ) lowerCamelCase : Optional[float] = field(default=0.2 , metadata={'help': 'Sampling temperature used for generation.'} ) lowerCamelCase : Optional[int] = field(default=2_56 , metadata={'help': 'Maximum number of newly generated tokens.'} ) lowerCamelCase : Optional[int] = field(default=0 , metadata={'help': 'Top-k parameter used for generation.'} ) lowerCamelCase : Optional[float] = field(default=0.95 , metadata={'help': 'Top-p parameter used for nucleus sampling.'} ) lowerCamelCase : Optional[int] = field(default=10 , metadata={'help': 'Number of generations to run in parallel.'} ) lowerCamelCase : Optional[int] = field( default=2_00 , metadata={'help': 'Number of completions to generate for each sample.'} ) lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} ) lowerCamelCase : Optional[str] = field( default='eval_results.json' , metadata={'help': 'Random seed used for evaluation.'} ) lowerCamelCase : Optional[str] = field( default='0' , metadata={'help': 'Allow `code_eval` to execute Python code on machine'} ) lowerCamelCase : Optional[int] = field( default=-1 , metadata={ 'help': ( 'Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive' ' number corresponds to which GPU device id to run on.' ) } , ) @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : Optional[int] = field( default=snake_case__ , metadata={ 'help': 'The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.' } , ) lowerCamelCase : Optional[str] = field( default='transformersbook/codeparrot' , metadata={'help': 'Folder or name of dataset to process.'} ) lowerCamelCase : Optional[str] = field( default='codeparrot-clean' , metadata={'help': 'Folder to save processed processed dataset.'} ) lowerCamelCase : Optional[int] = field( default=10_00_00 , metadata={'help': 'Number of files to save per JSON output file.'} ) lowerCamelCase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} ) lowerCamelCase : Optional[float] = field( default=10_00 , metadata={'help': 'Maximum line length in file, otherwise file is filtered.'} ) lowerCamelCase : Optional[float] = field( default=1_00 , metadata={'help': 'Maximum mean line length in file, otherwise file is filtered.'} ) lowerCamelCase : Optional[float] = field( default=0.25 , metadata={'help': 'Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'} ) lowerCamelCase : Optional[float] = field( default=1.5 , metadata={'help': 'Minimum character token ratio for the file, otherwise file is filtered.'} ) lowerCamelCase : Optional[float] = field( default=0.7 , metadata={'help': 'Probability for filtering config, test and uncommon files.'} ) lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} , ) lowerCamelCase : Optional[bool] = field( default=snake_case__ , metadata={'help': 'If True, near-duplicate samples are removed.'} ) lowerCamelCase : Optional[float] = field( default=0.85 , metadata={'help': 'Jaccard threshold for near-duplicate samples.'} ) @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : Optional[str] = field( default='gpt2' , metadata={'help': 'Base tokenizer to build new tokenizer from.'} ) lowerCamelCase : Optional[str] = field( default='transformersbook/codeparrot-train' , metadata={'help': 'Dataset to train tokenizer on.'} ) lowerCamelCase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} ) lowerCamelCase : Optional[int] = field(default=20_00_00 , metadata={'help': 'Number of examples to train tokenizer on.'} ) lowerCamelCase : Optional[int] = field( default=3_27_68 , metadata={'help': 'Number of examples to train the tokenizer on.'} ) lowerCamelCase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of new tokenizer.'} ) lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'Push saved tokenizer to the hub.'} ) @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} ) lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path to the dataset to pretokenize.'} ) lowerCamelCase : Optional[str] = field( default='tokenized-codeparrot-train' , metadata={'help': 'Repo name of the pretokenized data.'} ) lowerCamelCase : Optional[int] = field(default=snake_case__ , metadata={'help': 'Number of workers used for code evaluation.'} ) @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : Optional[str] = field( default='gpt2-large' , metadata={'help': 'Configuration to use for model initialization.'} ) lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot' , metadata={'help': 'Tokenizer attached to model.'} ) lowerCamelCase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of the created model.'} ) lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'Push saved tokenizer to the hub.'} )
687
0
"""simple docstring""" import dataclasses import json import sys import types from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints import yaml lowerCAmelCase_ = NewType('''DataClass''', Any) lowerCAmelCase_ = NewType('''DataClassType''', Any) def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Dict: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( F"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int: _SCREAMING_SNAKE_CASE : Tuple = {str(__SCREAMING_SNAKE_CASE ): choice for choice in choices} return lambda __SCREAMING_SNAKE_CASE : str_to_choice.get(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowerCamelCase_(*, __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = dataclasses.MISSING , __SCREAMING_SNAKE_CASE = dataclasses.MISSING , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , )-> str: if metadata is None: # Important, don't use as default param in function signature because dict is mutable and shared across function calls _SCREAMING_SNAKE_CASE : Dict = {} if aliases is not None: _SCREAMING_SNAKE_CASE : Union[str, Any] = aliases if help is not None: _SCREAMING_SNAKE_CASE : List[str] = help return dataclasses.field(metadata=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , default_factory=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) class _snake_case ( snake_case__ ): """simple docstring""" a = 42 def __init__( self : Dict , _A : Union[DataClassType, Iterable[DataClassType]] , **_A : List[str]): """simple docstring""" if "formatter_class" not in kwargs: _SCREAMING_SNAKE_CASE : List[Any] = ArgumentDefaultsHelpFormatter super().__init__(**_UpperCAmelCase) if dataclasses.is_dataclass(_UpperCAmelCase): _SCREAMING_SNAKE_CASE : Optional[Any] = [dataclass_types] _SCREAMING_SNAKE_CASE : Optional[Any] = list(_UpperCAmelCase) for dtype in self.dataclass_types: self._add_dataclass_arguments(_UpperCAmelCase) @staticmethod def _lowerCAmelCase ( _A : ArgumentParser , _A : dataclasses.Field): """simple docstring""" _SCREAMING_SNAKE_CASE : int = f"""--{field.name}""" _SCREAMING_SNAKE_CASE : Dict = field.metadata.copy() # field.metadata is not used at all by Data Classes, # it is provided as a third-party extension mechanism. if isinstance(field.type , _UpperCAmelCase): raise RuntimeError( """Unresolved type detected, which should have been done with the help of """ """`typing.get_type_hints` method by default""") _SCREAMING_SNAKE_CASE : str = kwargs.pop("""aliases""" , []) if isinstance(_UpperCAmelCase , _UpperCAmelCase): _SCREAMING_SNAKE_CASE : Tuple = [aliases] _SCREAMING_SNAKE_CASE : Dict = getattr(field.type , """__origin__""" , field.type) if origin_type is Union or (hasattr(_UpperCAmelCase , """UnionType""") and isinstance(_UpperCAmelCase , types.UnionType)): if str not in field.type.__args__ and ( len(field.type.__args__) != 2 or type(_UpperCAmelCase) not in field.type.__args__ ): raise ValueError( """Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because""" """ the argument parser only supports one type per argument.""" f""" Problem encountered in field '{field.name}'.""") if type(_UpperCAmelCase) not in field.type.__args__: # filter `str` in Union _SCREAMING_SNAKE_CASE : Dict = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] _SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(field.type , """__origin__""" , field.type) elif bool not in field.type.__args__: # filter `NoneType` in Union (except for `Union[bool, NoneType]`) _SCREAMING_SNAKE_CASE : List[str] = ( field.type.__args__[0] if isinstance(_UpperCAmelCase , field.type.__args__[1]) else field.type.__args__[1] ) _SCREAMING_SNAKE_CASE : Any = getattr(field.type , """__origin__""" , field.type) # A variable to store kwargs for a boolean field, if needed # so that we can init a `no_*` complement argument (see below) _SCREAMING_SNAKE_CASE : Tuple = {} if origin_type is Literal or (isinstance(field.type , _UpperCAmelCase) and issubclass(field.type , _UpperCAmelCase)): if origin_type is Literal: _SCREAMING_SNAKE_CASE : Tuple = field.type.__args__ else: _SCREAMING_SNAKE_CASE : str = [x.value for x in field.type] _SCREAMING_SNAKE_CASE : Dict = make_choice_type_function(kwargs["""choices"""]) if field.default is not dataclasses.MISSING: _SCREAMING_SNAKE_CASE : List[Any] = field.default else: _SCREAMING_SNAKE_CASE : Tuple = True elif field.type is bool or field.type == Optional[bool]: # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument _SCREAMING_SNAKE_CASE : Optional[Any] = copy(_UpperCAmelCase) # Hack because type=bool in argparse does not behave as we want. _SCREAMING_SNAKE_CASE : Optional[Any] = string_to_bool if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): # Default value is False if we have no default when of type bool. _SCREAMING_SNAKE_CASE : List[str] = False if field.default is dataclasses.MISSING else field.default # This is the value that will get picked if we don't include --field_name in any way _SCREAMING_SNAKE_CASE : Optional[int] = default # This tells argparse we accept 0 or 1 value after --field_name _SCREAMING_SNAKE_CASE : Any = '?' # This is the value that will get picked if we do --field_name (without value) _SCREAMING_SNAKE_CASE : Any = True elif isclass(_UpperCAmelCase) and issubclass(_UpperCAmelCase , _UpperCAmelCase): _SCREAMING_SNAKE_CASE : Tuple = field.type.__args__[0] _SCREAMING_SNAKE_CASE : int = '+' if field.default_factory is not dataclasses.MISSING: _SCREAMING_SNAKE_CASE : int = field.default_factory() elif field.default is dataclasses.MISSING: _SCREAMING_SNAKE_CASE : int = True else: _SCREAMING_SNAKE_CASE : str = field.type if field.default is not dataclasses.MISSING: _SCREAMING_SNAKE_CASE : Any = field.default elif field.default_factory is not dataclasses.MISSING: _SCREAMING_SNAKE_CASE : Dict = field.default_factory() else: _SCREAMING_SNAKE_CASE : Dict = True parser.add_argument(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase) # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. # Order is important for arguments with the same destination! # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down # here and we do not need those changes/additional keys. if field.default is True and (field.type is bool or field.type == Optional[bool]): _SCREAMING_SNAKE_CASE : Union[str, Any] = False parser.add_argument(f"""--no_{field.name}""" , action="""store_false""" , dest=field.name , **_UpperCAmelCase) def _lowerCAmelCase ( self : List[Any] , _A : DataClassType): """simple docstring""" if hasattr(_UpperCAmelCase , """_argument_group_name"""): _SCREAMING_SNAKE_CASE : Any = self.add_argument_group(dtype._argument_group_name) else: _SCREAMING_SNAKE_CASE : Tuple = self try: _SCREAMING_SNAKE_CASE : Dict[str, type] = get_type_hints(_UpperCAmelCase) except NameError: raise RuntimeError( f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """ """removing line of `from __future__ import annotations` which opts in Postponed """ """Evaluation of Annotations (PEP 563)""") except TypeError as ex: # Remove this block when we drop Python 3.9 support if sys.version_info[:2] < (3, 1_0) and "unsupported operand type(s) for |" in str(_UpperCAmelCase): _SCREAMING_SNAKE_CASE : List[Any] = '.'.join(map(_UpperCAmelCase , sys.version_info[:3])) raise RuntimeError( f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """ """line of `from __future__ import annotations` which opts in union types as """ """`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To """ """support Python versions that lower than 3.10, you need to use """ """`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of """ """`X | None`.""") from ex raise for field in dataclasses.fields(_UpperCAmelCase): if not field.init: continue _SCREAMING_SNAKE_CASE : List[Any] = type_hints[field.name] self._parse_dataclass_field(_UpperCAmelCase , _UpperCAmelCase) def _lowerCAmelCase ( self : Union[str, Any] , _A : Optional[int]=None , _A : Optional[int]=False , _A : Tuple=True , _A : Dict=None , _A : int=None , ): """simple docstring""" if args_file_flag or args_filename or (look_for_args_file and len(sys.argv)): _SCREAMING_SNAKE_CASE : Any = [] if args_filename: args_files.append(Path(_UpperCAmelCase)) elif look_for_args_file and len(sys.argv): args_files.append(Path(sys.argv[0]).with_suffix(""".args""")) # args files specified via command line flag should overwrite default args files so we add them last if args_file_flag: # Create special parser just to extract the args_file_flag values _SCREAMING_SNAKE_CASE : Tuple = ArgumentParser() args_file_parser.add_argument(_UpperCAmelCase , type=_UpperCAmelCase , action="""append""") # Use only remaining args for further parsing (remove the args_file_flag) _SCREAMING_SNAKE_CASE : List[Any] = args_file_parser.parse_known_args(args=_UpperCAmelCase) _SCREAMING_SNAKE_CASE : Any = vars(_UpperCAmelCase).get(args_file_flag.lstrip("""-""") , _UpperCAmelCase) if cmd_args_file_paths: args_files.extend([Path(_UpperCAmelCase) for p in cmd_args_file_paths]) _SCREAMING_SNAKE_CASE : List[Any] = [] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() # in case of duplicate arguments the last one has precedence # args specified via the command line should overwrite args from files, so we add them last _SCREAMING_SNAKE_CASE : int = file_args + args if args is not None else file_args + sys.argv[1:] _SCREAMING_SNAKE_CASE : Dict = self.parse_known_args(args=_UpperCAmelCase) _SCREAMING_SNAKE_CASE : List[Any] = [] for dtype in self.dataclass_types: _SCREAMING_SNAKE_CASE : List[str] = {f.name for f in dataclasses.fields(_UpperCAmelCase) if f.init} _SCREAMING_SNAKE_CASE : Tuple = {k: v for k, v in vars(_UpperCAmelCase).items() if k in keys} for k in keys: delattr(_UpperCAmelCase , _UpperCAmelCase) _SCREAMING_SNAKE_CASE : str = dtype(**_UpperCAmelCase) outputs.append(_UpperCAmelCase) if len(namespace.__dict__) > 0: # additional namespace. outputs.append(_UpperCAmelCase) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""") return (*outputs,) def _lowerCAmelCase ( self : Dict , _A : Dict[str, Any] , _A : bool = False): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = set(args.keys()) _SCREAMING_SNAKE_CASE : Union[str, Any] = [] for dtype in self.dataclass_types: _SCREAMING_SNAKE_CASE : str = {f.name for f in dataclasses.fields(_UpperCAmelCase) if f.init} _SCREAMING_SNAKE_CASE : Any = {k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys()) _SCREAMING_SNAKE_CASE : List[Any] = dtype(**_UpperCAmelCase) outputs.append(_UpperCAmelCase) if not allow_extra_keys and unused_keys: raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(_UpperCAmelCase)}""") return tuple(_UpperCAmelCase) def _lowerCAmelCase ( self : List[str] , _A : str , _A : bool = False): """simple docstring""" with open(Path(_UpperCAmelCase) , encoding="""utf-8""") as open_json_file: _SCREAMING_SNAKE_CASE : Any = json.loads(open_json_file.read()) _SCREAMING_SNAKE_CASE : List[str] = self.parse_dict(_UpperCAmelCase , allow_extra_keys=_UpperCAmelCase) return tuple(_UpperCAmelCase) def _lowerCAmelCase ( self : List[str] , _A : str , _A : bool = False): """simple docstring""" _SCREAMING_SNAKE_CASE : List[Any] = self.parse_dict(yaml.safe_load(Path(_UpperCAmelCase).read_text()) , allow_extra_keys=_UpperCAmelCase) return tuple(_UpperCAmelCase)
338
import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE__ ( self: List[Any] ): _lowerCAmelCase :List[str] = 'ylacombe/bark-small' _lowerCAmelCase :int = tempfile.mkdtemp() _lowerCAmelCase :List[str] = 'en_speaker_1' _lowerCAmelCase :Union[str, Any] = 'This is a test string' _lowerCAmelCase :List[Any] = 'speaker_embeddings_path.json' _lowerCAmelCase :str = 'speaker_embeddings' def SCREAMING_SNAKE_CASE__ ( self: str , **_UpperCAmelCase: Optional[Any] ): return AutoTokenizer.from_pretrained(self.checkpoint , **_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE__ ( self: Tuple ): _lowerCAmelCase :List[Any] = self.get_tokenizer() _lowerCAmelCase :List[str] = BarkProcessor(tokenizer=_UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) _lowerCAmelCase :List[str] = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def SCREAMING_SNAKE_CASE__ ( self: List[str] ): _lowerCAmelCase :List[str] = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) _lowerCAmelCase :Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) _lowerCAmelCase :Any = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): _lowerCAmelCase :Tuple = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) _lowerCAmelCase :List[Any] = 35 _lowerCAmelCase :Optional[int] = 2 _lowerCAmelCase :Dict = 8 _lowerCAmelCase :Dict = { 'semantic_prompt': np.ones(_UpperCAmelCase ), 'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ), 'fine_prompt': np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset _lowerCAmelCase :Dict = processor(text=self.input_string , voice_preset=_UpperCAmelCase ) _lowerCAmelCase :List[Any] = inputs['history_prompt'] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() ) # test loading voice preset from npz file _lowerCAmelCase :int = os.path.join(self.tmpdirname , 'file.npz' ) np.savez(_UpperCAmelCase , **_UpperCAmelCase ) _lowerCAmelCase :Dict = processor(text=self.input_string , voice_preset=_UpperCAmelCase ) _lowerCAmelCase :Optional[int] = inputs['history_prompt'] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() ) # test loading voice preset from the hub _lowerCAmelCase :Tuple = processor(text=self.input_string , voice_preset=self.voice_preset ) def SCREAMING_SNAKE_CASE__ ( self: List[Any] ): _lowerCAmelCase :Tuple = self.get_tokenizer() _lowerCAmelCase :Union[str, Any] = BarkProcessor(tokenizer=_UpperCAmelCase ) _lowerCAmelCase :List[Any] = processor(text=self.input_string ) _lowerCAmelCase :List[str] = tokenizer( self.input_string , padding='max_length' , max_length=256 , add_special_tokens=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
687
0
def __UpperCamelCase ( lowercase__ : str , lowercase__ : int ) -> List[Any]: '''simple docstring''' lowerCAmelCase_ : Any = word.split() def justify(lowercase__ : list , lowercase__ : int , lowercase__ : int ) -> str: lowerCAmelCase_ : Optional[Any] = max_width - width lowerCAmelCase_ : Optional[int] = len(lowercase__ ) if len(lowercase__ ) == 1: # if there is only word in line # just insert overall_spaces_count for the remainder of line return line[0] + " " * overall_spaces_count else: lowerCAmelCase_ : str = words_count - 1 # num_spaces_between_words_list[i] : tells you to insert # num_spaces_between_words_list[i] spaces # after word on line[i] lowerCAmelCase_ : List[str] = spaces_to_insert_between_words * [ overall_spaces_count // spaces_to_insert_between_words ] lowerCAmelCase_ : int = ( overall_spaces_count % spaces_to_insert_between_words ) # distribute spaces via round robin to the left words for i in range(lowercase__ ): num_spaces_between_words_list[i] += 1 lowerCAmelCase_ : List[str] = [] for i in range(lowercase__ ): # add the word aligned_words_list.append(line[i] ) # add the spaces to insert aligned_words_list.append(num_spaces_between_words_list[i] * """ """ ) # just add the last word to the sentence aligned_words_list.append(line[-1] ) # join the aligned words list to form a justified line return "".join(lowercase__ ) lowerCAmelCase_ : List[str] = [] lowerCAmelCase_ : list[str] = [] lowerCAmelCase_ : int = 0 for word in words: if width + len(lowercase__ ) + len(lowercase__ ) <= max_width: # keep adding words until we can fill out max_width # width = sum of length of all words (without overall_spaces_count) # len(word) = length of current word # len(line) = number of overall_spaces_count to insert between words line.append(lowercase__ ) width += len(lowercase__ ) else: # justify the line and add it to result answer.append(justify(lowercase__ , lowercase__ , lowercase__ ) ) # reset new line and new width lowerCAmelCase_ : Dict = [word], len(lowercase__ ) lowerCAmelCase_ : Dict = max_width - width - len(lowercase__ ) answer.append(""" """.join(lowercase__ ) + (remaining_spaces + 1) * """ """ ) return answer if __name__ == "__main__": from doctest import testmod testmod()
600
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a = logging.get_logger(__name__) a = { """bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""", """bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""", """bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""", """bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""", """bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""", """bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""", """bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""", """bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""", """bert-large-uncased-whole-word-masking""": ( """https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json""" ), """bert-large-cased-whole-word-masking""": ( """https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json""" ), """bert-large-uncased-whole-word-masking-finetuned-squad""": ( """https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json""" ), """bert-large-cased-whole-word-masking-finetuned-squad""": ( """https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json""" ), """bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""", """bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""", """bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""", """cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""", """cl-tohoku/bert-base-japanese-whole-word-masking""": ( """https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json""" ), """cl-tohoku/bert-base-japanese-char""": ( """https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json""" ), """cl-tohoku/bert-base-japanese-char-whole-word-masking""": ( """https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json""" ), """TurkuNLP/bert-base-finnish-cased-v1""": ( """https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json""" ), """TurkuNLP/bert-base-finnish-uncased-v1""": ( """https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json""" ), """wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""", # See all BERT models at https://huggingface.co/models?filter=bert } class UpperCAmelCase_ (snake_case__ ): """simple docstring""" lowerCamelCase : int = 'bert' def __init__( self: Optional[Any] , _UpperCAmelCase: Tuple=3_0522 , _UpperCAmelCase: int=768 , _UpperCAmelCase: Union[str, Any]=12 , _UpperCAmelCase: Dict=12 , _UpperCAmelCase: List[Any]=3072 , _UpperCAmelCase: List[Any]="gelu" , _UpperCAmelCase: Union[str, Any]=0.1 , _UpperCAmelCase: Dict=0.1 , _UpperCAmelCase: List[Any]=512 , _UpperCAmelCase: Optional[Any]=2 , _UpperCAmelCase: Optional[int]=0.0_2 , _UpperCAmelCase: Any=1e-1_2 , _UpperCAmelCase: Optional[Any]=0 , _UpperCAmelCase: Union[str, Any]="absolute" , _UpperCAmelCase: Dict=True , _UpperCAmelCase: Optional[Any]=None , **_UpperCAmelCase: Optional[int] , ): super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase ) _lowerCAmelCase :List[Any] = vocab_size _lowerCAmelCase :Tuple = hidden_size _lowerCAmelCase :Dict = num_hidden_layers _lowerCAmelCase :Optional[Any] = num_attention_heads _lowerCAmelCase :List[Any] = hidden_act _lowerCAmelCase :int = intermediate_size _lowerCAmelCase :Tuple = hidden_dropout_prob _lowerCAmelCase :Tuple = attention_probs_dropout_prob _lowerCAmelCase :List[Any] = max_position_embeddings _lowerCAmelCase :Dict = type_vocab_size _lowerCAmelCase :Any = initializer_range _lowerCAmelCase :int = layer_norm_eps _lowerCAmelCase :List[Any] = position_embedding_type _lowerCAmelCase :int = use_cache _lowerCAmelCase :Union[str, Any] = classifier_dropout class UpperCAmelCase_ (snake_case__ ): """simple docstring""" @property def SCREAMING_SNAKE_CASE__ ( self: List[Any] ): if self.task == "multiple-choice": _lowerCAmelCase :List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _lowerCAmelCase :Any = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ] )
687
0
'''simple docstring''' import secrets from random import shuffle from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation def lowerCamelCase__ ( __lowerCamelCase : int = 8 ): '''simple docstring''' _UpperCAmelCase : List[str] =ascii_letters + digits + punctuation return "".join(secrets.choice(__lowerCamelCase ) for _ in range(__lowerCamelCase ) ) def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : int ): '''simple docstring''' i -= len(__lowerCamelCase ) _UpperCAmelCase : int =i // 3 _UpperCAmelCase : Any =i % 3 # chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) + # random_number(digits, i / 3) + random_characters(punctuation, i / 3) _UpperCAmelCase : Dict =( chars_incl + random(__lowerCamelCase , quotient + remainder ) + random(__lowerCamelCase , __lowerCamelCase ) + random(__lowerCamelCase , __lowerCamelCase ) ) _UpperCAmelCase : Dict =list(__lowerCamelCase ) shuffle(__lowerCamelCase ) return "".join(__lowerCamelCase ) # random is a generalised function for letters, characters and numbers def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : int ): '''simple docstring''' return "".join(secrets.choice(__lowerCamelCase ) for _ in range(__lowerCamelCase ) ) def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Any ): '''simple docstring''' pass # Put your code here... def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : Any ): '''simple docstring''' pass # Put your code here... def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] ): '''simple docstring''' pass # Put your code here... def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : int = 8 ): '''simple docstring''' if len(__lowerCamelCase ) < min_length: # Your Password must be at least 8 characters long return False _UpperCAmelCase : int =any(char in ascii_uppercase for char in password ) _UpperCAmelCase : Tuple =any(char in ascii_lowercase for char in password ) _UpperCAmelCase : Union[str, Any] =any(char in digits for char in password ) _UpperCAmelCase : Dict =any(char in punctuation for char in password ) return upper and lower and num and spec_char # Passwords should contain UPPERCASE, lowerase # numbers, and special characters def lowerCamelCase__ ( ): '''simple docstring''' _UpperCAmelCase : Union[str, Any] =int(input('Please indicate the max length of your password: ' ).strip() ) _UpperCAmelCase : Optional[int] =input( 'Please indicate the characters that must be in your password: ' ).strip() print('Password generated:' , password_generator(__lowerCamelCase ) ) print( 'Alternative Password generated:' , alternative_password_generator(__lowerCamelCase , __lowerCamelCase ) , ) print('[If you are thinking of using this passsword, You better save it.]' ) if __name__ == "__main__": main()
446
import inspect from typing import Optional, Union import numpy as np import PIL import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import ( PIL_INTERPOLATION, randn_tensor, ) def UpperCamelCase_( __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Tuple ): """simple docstring""" if isinstance(__magic_name__ , torch.Tensor ): return image elif isinstance(__magic_name__ , PIL.Image.Image ): _lowerCAmelCase :Tuple = [image] if isinstance(image[0] , PIL.Image.Image ): _lowerCAmelCase :List[Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image] _lowerCAmelCase :Optional[Any] = np.concatenate(__magic_name__ , axis=0 ) _lowerCAmelCase :Any = np.array(__magic_name__ ).astype(np.floataa ) / 255.0 _lowerCAmelCase :Optional[int] = image.transpose(0 , 3 , 1 , 2 ) _lowerCAmelCase :int = 2.0 * image - 1.0 _lowerCAmelCase :Optional[int] = torch.from_numpy(__magic_name__ ) elif isinstance(image[0] , torch.Tensor ): _lowerCAmelCase :str = torch.cat(__magic_name__ , dim=0 ) return image def UpperCamelCase_( __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : int=0.9995 ): """simple docstring""" if not isinstance(__magic_name__ , np.ndarray ): _lowerCAmelCase :Tuple = True _lowerCAmelCase :str = va.device _lowerCAmelCase :List[str] = va.cpu().numpy() _lowerCAmelCase :List[str] = va.cpu().numpy() _lowerCAmelCase :Any = np.sum(va * va / (np.linalg.norm(__magic_name__ ) * np.linalg.norm(__magic_name__ )) ) if np.abs(__magic_name__ ) > DOT_THRESHOLD: _lowerCAmelCase :Optional[Any] = (1 - t) * va + t * va else: _lowerCAmelCase :int = np.arccos(__magic_name__ ) _lowerCAmelCase :Union[str, Any] = np.sin(__magic_name__ ) _lowerCAmelCase :Union[str, Any] = theta_a * t _lowerCAmelCase :str = np.sin(__magic_name__ ) _lowerCAmelCase :Any = np.sin(theta_a - theta_t ) / sin_theta_a _lowerCAmelCase :Optional[Any] = sin_theta_t / sin_theta_a _lowerCAmelCase :List[Any] = sa * va + sa * va if inputs_are_torch: _lowerCAmelCase :int = torch.from_numpy(__magic_name__ ).to(__magic_name__ ) return va def UpperCamelCase_( __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ): """simple docstring""" _lowerCAmelCase :Any = F.normalize(__magic_name__ , dim=-1 ) _lowerCAmelCase :str = F.normalize(__magic_name__ , dim=-1 ) return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 ) def UpperCamelCase_( __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] ): """simple docstring""" for param in model.parameters(): _lowerCAmelCase :List[str] = value class UpperCAmelCase_ (snake_case__ ): """simple docstring""" def __init__( self: Any , _UpperCAmelCase: AutoencoderKL , _UpperCAmelCase: CLIPTextModel , _UpperCAmelCase: CLIPModel , _UpperCAmelCase: CLIPTokenizer , _UpperCAmelCase: UNetaDConditionModel , _UpperCAmelCase: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , _UpperCAmelCase: CLIPFeatureExtractor , _UpperCAmelCase: str=None , _UpperCAmelCase: Tuple=None , _UpperCAmelCase: Union[str, Any]=None , ): super().__init__() self.register_modules( vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , clip_model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , coca_model=_UpperCAmelCase , coca_tokenizer=_UpperCAmelCase , coca_transform=_UpperCAmelCase , ) _lowerCAmelCase :int = ( feature_extractor.size if isinstance(feature_extractor.size , _UpperCAmelCase ) else feature_extractor.size['shortest_edge'] ) _lowerCAmelCase :Union[str, Any] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std ) set_requires_grad(self.text_encoder , _UpperCAmelCase ) set_requires_grad(self.clip_model , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Optional[Union[str, int]] = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory _lowerCAmelCase :Any = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: List[Any] ): self.enable_attention_slicing(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Any ): set_requires_grad(self.vae , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ): set_requires_grad(self.vae , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Any ): set_requires_grad(self.unet , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: List[Any] ): set_requires_grad(self.unet , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Dict ): # get the original timestep using init_timestep _lowerCAmelCase :Optional[Any] = min(int(num_inference_steps * strength ) , _UpperCAmelCase ) _lowerCAmelCase :List[str] = max(num_inference_steps - init_timestep , 0 ) _lowerCAmelCase :Tuple = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Union[str, Any]=None ): if not isinstance(_UpperCAmelCase , torch.Tensor ): raise ValueError(f"""`image` has to be of type `torch.Tensor` but is {type(_UpperCAmelCase )}""" ) _lowerCAmelCase :Union[str, Any] = image.to(device=_UpperCAmelCase , dtype=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ): _lowerCAmelCase :List[Any] = [ self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_UpperCAmelCase ) ] _lowerCAmelCase :List[str] = torch.cat(_UpperCAmelCase , dim=0 ) else: _lowerCAmelCase :List[str] = self.vae.encode(_UpperCAmelCase ).latent_dist.sample(_UpperCAmelCase ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor _lowerCAmelCase :List[Any] = 0.1_8_2_1_5 * init_latents _lowerCAmelCase :List[Any] = init_latents.repeat_interleave(_UpperCAmelCase , dim=0 ) _lowerCAmelCase :Dict = randn_tensor(init_latents.shape , generator=_UpperCAmelCase , device=_UpperCAmelCase , dtype=_UpperCAmelCase ) # get latents _lowerCAmelCase :Dict = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) _lowerCAmelCase :List[str] = init_latents return latents def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Union[str, Any] ): _lowerCAmelCase :Optional[int] = self.coca_transform(_UpperCAmelCase ).unsqueeze(0 ) with torch.no_grad(), torch.cuda.amp.autocast(): _lowerCAmelCase :Optional[Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) ) _lowerCAmelCase :int = self.coca_tokenizer.decode(generated[0].cpu().numpy() ) return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' ) def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: List[str] ): _lowerCAmelCase :Optional[int] = self.feature_extractor.preprocess(_UpperCAmelCase ) _lowerCAmelCase :List[Any] = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half() _lowerCAmelCase :List[str] = self.clip_model.get_image_features(_UpperCAmelCase ) _lowerCAmelCase :List[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_UpperCAmelCase ) _lowerCAmelCase :Dict = image_embeddings_clip.repeat_interleave(_UpperCAmelCase , dim=0 ) return image_embeddings_clip @torch.enable_grad() def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: List[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple , _UpperCAmelCase: Dict , _UpperCAmelCase: str , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple , ): _lowerCAmelCase :Dict = latents.detach().requires_grad_() _lowerCAmelCase :Optional[Any] = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase ) # predict the noise residual _lowerCAmelCase :Optional[Any] = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ): _lowerCAmelCase :int = self.scheduler.alphas_cumprod[timestep] _lowerCAmelCase :Optional[int] = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _lowerCAmelCase :str = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 _lowerCAmelCase :Optional[Any] = torch.sqrt(_UpperCAmelCase ) _lowerCAmelCase :List[str] = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler , _UpperCAmelCase ): _lowerCAmelCase :Dict = self.scheduler.sigmas[index] _lowerCAmelCase :Optional[Any] = latents - sigma * noise_pred else: raise ValueError(f"""scheduler type {type(self.scheduler )} not supported""" ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor _lowerCAmelCase :Tuple = 1 / 0.1_8_2_1_5 * sample _lowerCAmelCase :Optional[Any] = self.vae.decode(_UpperCAmelCase ).sample _lowerCAmelCase :List[Any] = (image / 2 + 0.5).clamp(0 , 1 ) _lowerCAmelCase :Tuple = transforms.Resize(self.feature_extractor_size )(_UpperCAmelCase ) _lowerCAmelCase :Tuple = self.normalize(_UpperCAmelCase ).to(latents.dtype ) _lowerCAmelCase :List[Any] = self.clip_model.get_image_features(_UpperCAmelCase ) _lowerCAmelCase :List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_UpperCAmelCase ) _lowerCAmelCase :Tuple = spherical_dist_loss(_UpperCAmelCase , _UpperCAmelCase ).mean() * clip_guidance_scale _lowerCAmelCase :str = -torch.autograd.grad(_UpperCAmelCase , _UpperCAmelCase )[0] if isinstance(self.scheduler , _UpperCAmelCase ): _lowerCAmelCase :Union[str, Any] = latents.detach() + grads * (sigma**2) _lowerCAmelCase :Dict = noise_pred_original else: _lowerCAmelCase :Optional[int] = noise_pred_original - torch.sqrt(_UpperCAmelCase ) * grads return noise_pred, latents @torch.no_grad() def __call__( self: Optional[int] , _UpperCAmelCase: Union[torch.FloatTensor, PIL.Image.Image] , _UpperCAmelCase: Union[torch.FloatTensor, PIL.Image.Image] , _UpperCAmelCase: Optional[str] = None , _UpperCAmelCase: Optional[str] = None , _UpperCAmelCase: Optional[int] = 512 , _UpperCAmelCase: Optional[int] = 512 , _UpperCAmelCase: float = 0.6 , _UpperCAmelCase: Optional[int] = 50 , _UpperCAmelCase: Optional[float] = 7.5 , _UpperCAmelCase: Optional[int] = 1 , _UpperCAmelCase: float = 0.0 , _UpperCAmelCase: Optional[float] = 100 , _UpperCAmelCase: Optional[torch.Generator] = None , _UpperCAmelCase: Optional[str] = "pil" , _UpperCAmelCase: bool = True , _UpperCAmelCase: float = 0.8 , _UpperCAmelCase: float = 0.1 , _UpperCAmelCase: float = 0.1 , ): if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) != batch_size: raise ValueError(f"""You have passed {batch_size} batch_size, but only {len(_UpperCAmelCase )} generators.""" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" ) if isinstance(_UpperCAmelCase , torch.Generator ) and batch_size > 1: _lowerCAmelCase :int = [generator] + [None] * (batch_size - 1) _lowerCAmelCase :List[Any] = [ ('model', self.coca_model is None), ('tokenizer', self.coca_tokenizer is None), ('transform', self.coca_transform is None), ] _lowerCAmelCase :Optional[int] = [x[0] for x in coca_is_none if x[1]] _lowerCAmelCase :List[str] = ', '.join(_UpperCAmelCase ) # generate prompts with coca model if prompt is None if content_prompt is None: if len(_UpperCAmelCase ): raise ValueError( f"""Content prompt is None and CoCa [{coca_is_none_str}] is None.""" f"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" ) _lowerCAmelCase :List[Any] = self.get_image_description(_UpperCAmelCase ) if style_prompt is None: if len(_UpperCAmelCase ): raise ValueError( f"""Style prompt is None and CoCa [{coca_is_none_str}] is None.""" f""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" ) _lowerCAmelCase :Any = self.get_image_description(_UpperCAmelCase ) # get prompt text embeddings for content and style _lowerCAmelCase :Any = self.tokenizer( _UpperCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_UpperCAmelCase , return_tensors='pt' , ) _lowerCAmelCase :str = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0] _lowerCAmelCase :int = self.tokenizer( _UpperCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_UpperCAmelCase , return_tensors='pt' , ) _lowerCAmelCase :Union[str, Any] = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0] _lowerCAmelCase :List[str] = slerp(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # duplicate text embeddings for each generation per prompt _lowerCAmelCase :str = text_embeddings.repeat_interleave(_UpperCAmelCase , dim=0 ) # set timesteps _lowerCAmelCase :Any = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() ) _lowerCAmelCase :Dict = {} if accepts_offset: _lowerCAmelCase :Optional[int] = 1 self.scheduler.set_timesteps(_UpperCAmelCase , **_UpperCAmelCase ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device ) _lowerCAmelCase , _lowerCAmelCase :List[str] = self.get_timesteps(_UpperCAmelCase , _UpperCAmelCase , self.device ) _lowerCAmelCase :int = timesteps[:1].repeat(_UpperCAmelCase ) # Preprocess image _lowerCAmelCase :Dict = preprocess(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) _lowerCAmelCase :int = self.prepare_latents( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , text_embeddings.dtype , self.device , _UpperCAmelCase ) _lowerCAmelCase :Any = preprocess(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) _lowerCAmelCase :Union[str, Any] = self.prepare_latents( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , text_embeddings.dtype , self.device , _UpperCAmelCase ) _lowerCAmelCase :str = slerp(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if clip_guidance_scale > 0: _lowerCAmelCase :Optional[Any] = self.get_clip_image_embeddings(_UpperCAmelCase , _UpperCAmelCase ) _lowerCAmelCase :Dict = self.get_clip_image_embeddings(_UpperCAmelCase , _UpperCAmelCase ) _lowerCAmelCase :Any = slerp( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. _lowerCAmelCase :int = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: _lowerCAmelCase :Optional[int] = content_text_input.input_ids.shape[-1] _lowerCAmelCase :Union[str, Any] = self.tokenizer([''] , padding='max_length' , max_length=_UpperCAmelCase , return_tensors='pt' ) _lowerCAmelCase :Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt _lowerCAmelCase :Optional[int] = uncond_embeddings.repeat_interleave(_UpperCAmelCase , dim=0 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _lowerCAmelCase :int = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. _lowerCAmelCase :Tuple = (batch_size, self.unet.config.in_channels, height // 8, width // 8) _lowerCAmelCase :Optional[Any] = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps _lowerCAmelCase :Any = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device='cpu' , dtype=_UpperCAmelCase ).to( self.device ) else: _lowerCAmelCase :List[Any] = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase ) else: if latents.shape != latents_shape: raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" ) _lowerCAmelCase :int = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler _lowerCAmelCase :Optional[Any] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] _lowerCAmelCase :Any = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) _lowerCAmelCase :Any = {} if accepts_eta: _lowerCAmelCase :Any = eta # check if the scheduler accepts generator _lowerCAmelCase :List[Any] = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) if accepts_generator: _lowerCAmelCase :List[Any] = generator with self.progress_bar(total=_UpperCAmelCase ): for i, t in enumerate(_UpperCAmelCase ): # expand the latents if we are doing classifier free guidance _lowerCAmelCase :Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _lowerCAmelCase :Tuple = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase ) # predict the noise residual _lowerCAmelCase :Optional[Any] = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample # perform classifier free guidance if do_classifier_free_guidance: _lowerCAmelCase , _lowerCAmelCase :List[str] = noise_pred.chunk(2 ) _lowerCAmelCase :Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: _lowerCAmelCase :List[Any] = ( text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings ) _lowerCAmelCase , _lowerCAmelCase :List[str] = self.cond_fn( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) # compute the previous noisy sample x_t -> x_t-1 _lowerCAmelCase :str = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor _lowerCAmelCase :str = 1 / 0.1_8_2_1_5 * latents _lowerCAmelCase :Any = self.vae.decode(_UpperCAmelCase ).sample _lowerCAmelCase :List[str] = (image / 2 + 0.5).clamp(0 , 1 ) _lowerCAmelCase :Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _lowerCAmelCase :List[Any] = self.numpy_to_pil(_UpperCAmelCase ) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=_UpperCAmelCase , nsfw_content_detected=_UpperCAmelCase )
687
0
from ...processing_utils import ProcessorMixin class __lowercase (snake_case__ ): """simple docstring""" _UpperCAmelCase = ['image_processor', 'feature_extractor'] _UpperCAmelCase = 'TvltImageProcessor' _UpperCAmelCase = 'TvltFeatureExtractor' def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ): """simple docstring""" super().__init__(image_processor=_UpperCAmelCase , feature_extractor=_UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : str = image_processor SCREAMING_SNAKE_CASE_ : Union[str, Any] = feature_extractor def __call__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=False , lowerCAmelCase__=False , *lowerCAmelCase__ , **lowerCAmelCase__ , ): """simple docstring""" if images is None and audio is None: raise ValueError('You need to specify either an `images` or `audio` input to process.' ) SCREAMING_SNAKE_CASE_ : str = None if images is not None: SCREAMING_SNAKE_CASE_ : List[str] = self.image_processor(_UpperCAmelCase , mask_pixel=_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) if images_mixed is not None: SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.image_processor(_UpperCAmelCase , is_mixed=_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) if audio is not None: SCREAMING_SNAKE_CASE_ : List[str] = self.feature_extractor( _UpperCAmelCase , *_UpperCAmelCase , sampling_rate=_UpperCAmelCase , mask_audio=_UpperCAmelCase , **_UpperCAmelCase ) SCREAMING_SNAKE_CASE_ : Optional[int] = {} if audio is not None: output_dict.update(_UpperCAmelCase ) if images is not None: output_dict.update(_UpperCAmelCase ) if images_mixed_dict is not None: output_dict.update(_UpperCAmelCase ) return output_dict @property def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = self.image_processor.model_input_names SCREAMING_SNAKE_CASE_ : Tuple = self.feature_extractor.model_input_names return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
101
from __future__ import annotations from collections.abc import Sequence from typing import Literal def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str ): """simple docstring""" _lowerCAmelCase :Optional[int] = list(__magic_name__ ) _lowerCAmelCase :Dict = list(__magic_name__ ) _lowerCAmelCase :Any = 0 for i in range(len(__magic_name__ ) ): if lista[i] != lista[i]: count += 1 _lowerCAmelCase :Union[str, Any] = '_' if count > 1: return False else: return "".join(__magic_name__ ) def UpperCamelCase_( __magic_name__ : list[str] ): """simple docstring""" _lowerCAmelCase :int = [] while True: _lowerCAmelCase :str = ['$'] * len(__magic_name__ ) _lowerCAmelCase :Optional[int] = [] for i in range(len(__magic_name__ ) ): for j in range(i + 1 , len(__magic_name__ ) ): _lowerCAmelCase :int = compare_string(binary[i] , binary[j] ) if k is False: _lowerCAmelCase :str = '*' _lowerCAmelCase :Union[str, Any] = '*' temp.append('X' ) for i in range(len(__magic_name__ ) ): if checka[i] == "$": pi.append(binary[i] ) if len(__magic_name__ ) == 0: return pi _lowerCAmelCase :Any = list(set(__magic_name__ ) ) def UpperCamelCase_( __magic_name__ : int , __magic_name__ : Sequence[float] ): """simple docstring""" _lowerCAmelCase :str = [] for minterm in minterms: _lowerCAmelCase :Any = '' for _ in range(__magic_name__ ): _lowerCAmelCase :Tuple = str(minterm % 2 ) + string minterm //= 2 temp.append(__magic_name__ ) return temp def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str , __magic_name__ : int ): """simple docstring""" _lowerCAmelCase :Optional[Any] = list(__magic_name__ ) _lowerCAmelCase :List[Any] = list(__magic_name__ ) _lowerCAmelCase :Optional[Any] = 0 for i in range(len(__magic_name__ ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def UpperCamelCase_( __magic_name__ : list[list[int]] , __magic_name__ : list[str] ): """simple docstring""" _lowerCAmelCase :str = [] _lowerCAmelCase :List[str] = [0] * len(__magic_name__ ) for i in range(len(chart[0] ) ): _lowerCAmelCase :Dict = 0 _lowerCAmelCase :Optional[Any] = -1 for j in range(len(__magic_name__ ) ): if chart[j][i] == 1: count += 1 _lowerCAmelCase :List[Any] = j if count == 1: _lowerCAmelCase :Dict = 1 for i in range(len(__magic_name__ ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(__magic_name__ ) ): _lowerCAmelCase :Dict = 0 temp.append(prime_implicants[i] ) while True: _lowerCAmelCase :Dict = 0 _lowerCAmelCase :Any = -1 _lowerCAmelCase :Optional[Any] = 0 for i in range(len(__magic_name__ ) ): _lowerCAmelCase :str = chart[i].count(1 ) if count_n > max_n: _lowerCAmelCase :Optional[Any] = count_n _lowerCAmelCase :Dict = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(__magic_name__ ) ): _lowerCAmelCase :str = 0 def UpperCamelCase_( __magic_name__ : list[str] , __magic_name__ : list[str] ): """simple docstring""" _lowerCAmelCase :str = [[0 for x in range(len(__magic_name__ ) )] for x in range(len(__magic_name__ ) )] for i in range(len(__magic_name__ ) ): _lowerCAmelCase :Tuple = prime_implicants[i].count('_' ) for j in range(len(__magic_name__ ) ): if is_for_table(prime_implicants[i] , binary[j] , __magic_name__ ): _lowerCAmelCase :str = 1 return chart def UpperCamelCase_( ): """simple docstring""" _lowerCAmelCase :Tuple = int(input('Enter the no. of variables\n' ) ) _lowerCAmelCase :Tuple = [ float(__magic_name__ ) for x in input( 'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split() ] _lowerCAmelCase :List[str] = decimal_to_binary(__magic_name__ , __magic_name__ ) _lowerCAmelCase :Any = check(__magic_name__ ) print('Prime Implicants are:' ) print(__magic_name__ ) _lowerCAmelCase :List[Any] = prime_implicant_chart(__magic_name__ , __magic_name__ ) _lowerCAmelCase :Tuple = selection(__magic_name__ , __magic_name__ ) print('Essential Prime Implicants are:' ) print(__magic_name__ ) if __name__ == "__main__": import doctest doctest.testmod() main()
687
0
'''simple docstring''' import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class _lowercase : _SCREAMING_SNAKE_CASE : Optional[Union[str, Path]] = None _SCREAMING_SNAKE_CASE : bool = False _SCREAMING_SNAKE_CASE : bool = False _SCREAMING_SNAKE_CASE : bool = False _SCREAMING_SNAKE_CASE : Optional[Dict] = None _SCREAMING_SNAKE_CASE : Optional[str] = None _SCREAMING_SNAKE_CASE : bool = False _SCREAMING_SNAKE_CASE : bool = False _SCREAMING_SNAKE_CASE : bool = False _SCREAMING_SNAKE_CASE : bool = True _SCREAMING_SNAKE_CASE : Optional[int] = None _SCREAMING_SNAKE_CASE : int = 1 _SCREAMING_SNAKE_CASE : Optional[Union[str, bool]] = None _SCREAMING_SNAKE_CASE : bool = False _SCREAMING_SNAKE_CASE : Optional[Dict] = None _SCREAMING_SNAKE_CASE : Optional[str] = None def a ( self : Optional[Any] ) -> Tuple: return self.__class__(**{k: copy.deepcopy(_UpperCAmelCase ) for k, v in self.__dict__.items()} )
56
import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py a = """\ @INPROCEEDINGS{Papineni02bleu:a, author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu}, title = {BLEU: a Method for Automatic Evaluation of Machine Translation}, booktitle = {}, year = {2002}, pages = {311--318} } @inproceedings{lin-och-2004-orange, title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\", author = \"Lin, Chin-Yew and Och, Franz Josef\", booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\", month = \"aug 23{--}aug 27\", year = \"2004\", address = \"Geneva, Switzerland\", publisher = \"COLING\", url = \"https://www.aclweb.org/anthology/C04-1072\", pages = \"501--507\", } """ a = """\ BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another. Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation, the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and remains one of the most popular automated and inexpensive metrics. Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations. Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness are not taken into account[citation needed]. BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1 representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional reference translations will increase the BLEU score. """ a = """ Computes BLEU score of translated segments against one or more references. Args: predictions: list of translations to score. Each translation should be tokenized into a list of tokens. references: list of lists of references for each translation. Each reference should be tokenized into a list of tokens. max_order: Maximum n-gram order to use when computing BLEU score. smooth: Whether or not to apply Lin et al. 2004 smoothing. Returns: 'bleu': bleu score, 'precisions': geometric mean of n-gram precisions, 'brevity_penalty': brevity penalty, 'length_ratio': ratio of lengths, 'translation_length': translation_length, 'reference_length': reference_length Examples: >>> predictions = [ ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample ... ] >>> references = [ ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references) ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference) ... ] >>> bleu = datasets.load_metric(\"bleu\") >>> results = bleu.compute(predictions=predictions, references=references) >>> print(results[\"bleu\"]) 1.0 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase_ (datasets.Metric ): """simple docstring""" def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ), 'references': datasets.Sequence( datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ), } ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[ 'https://en.wikipedia.org/wiki/BLEU', 'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213', ] , ) def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: int , _UpperCAmelCase: Optional[int]=4 , _UpperCAmelCase: Optional[int]=False ): _lowerCAmelCase :Any = compute_bleu( reference_corpus=_UpperCAmelCase , translation_corpus=_UpperCAmelCase , max_order=_UpperCAmelCase , smooth=_UpperCAmelCase ) ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) :Tuple = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
687
0
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 SCREAMING_SNAKE_CASE = get_tests_dir('fixtures') SCREAMING_SNAKE_CASE = get_tests_dir('fixtures/dummy_feature_extractor_config.json') SCREAMING_SNAKE_CASE = get_tests_dir('fixtures/dummy-config.json') class A_ ( unittest.TestCase ): '''simple docstring''' def snake_case__ ( self) -> Optional[int]: """simple docstring""" _UpperCAmelCase : int = 0 def snake_case__ ( self) -> Optional[int]: """simple docstring""" _UpperCAmelCase : List[Any] = AutoFeatureExtractor.from_pretrained('''facebook/wav2vec2-base-960h''') self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase) def snake_case__ ( self) -> int: """simple docstring""" _UpperCAmelCase : Dict = AutoFeatureExtractor.from_pretrained(_UpperCAmelCase) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase) def snake_case__ ( self) -> List[Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: _UpperCAmelCase : Union[str, Any] = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally _UpperCAmelCase : Tuple = AutoFeatureExtractor.from_pretrained(_UpperCAmelCase).to_dict() config_dict.pop('''feature_extractor_type''') _UpperCAmelCase : List[str] = WavaVecaFeatureExtractor(**_UpperCAmelCase) # save in new folder model_config.save_pretrained(_UpperCAmelCase) config.save_pretrained(_UpperCAmelCase) _UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained(_UpperCAmelCase) # make sure private variable is not incorrectly saved _UpperCAmelCase : Optional[int] = json.loads(config.to_json_string()) self.assertTrue('''_processor_class''' not in dict_as_saved) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase) def snake_case__ ( self) -> int: """simple docstring""" _UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(_UpperCAmelCase) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase) def snake_case__ ( self) -> List[str]: """simple docstring""" with self.assertRaisesRegex( _UpperCAmelCase , '''bert-base is not a local folder and is not a valid model identifier'''): _UpperCAmelCase : int = AutoFeatureExtractor.from_pretrained('''bert-base''') def snake_case__ ( self) -> Any: """simple docstring""" with self.assertRaisesRegex( _UpperCAmelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'''): _UpperCAmelCase : int = AutoFeatureExtractor.from_pretrained(_UpperCAmelCase , revision='''aaaaaa''') def snake_case__ ( self) -> int: """simple docstring""" with self.assertRaisesRegex( _UpperCAmelCase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ): _UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained('''hf-internal-testing/config-no-model''') def snake_case__ ( self) -> List[str]: """simple docstring""" with self.assertRaises(_UpperCAmelCase): _UpperCAmelCase : Tuple = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''') # If remote code is disabled, we can't load this config. with self.assertRaises(_UpperCAmelCase): _UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=_UpperCAmelCase) _UpperCAmelCase : List[Any] = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=_UpperCAmelCase) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''') # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(_UpperCAmelCase) _UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(_UpperCAmelCase , trust_remote_code=_UpperCAmelCase) self.assertEqual(reloaded_feature_extractor.__class__.__name__ , '''NewFeatureExtractor''') def snake_case__ ( self) -> Any: """simple docstring""" try: AutoConfig.register('''custom''' , _UpperCAmelCase) AutoFeatureExtractor.register(_UpperCAmelCase , _UpperCAmelCase) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_UpperCAmelCase): AutoFeatureExtractor.register(_UpperCAmelCase , _UpperCAmelCase) # Now that the config is registered, it can be used as any other config with the auto-API _UpperCAmelCase : Tuple = CustomFeatureExtractor.from_pretrained(_UpperCAmelCase) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(_UpperCAmelCase) _UpperCAmelCase : Any = AutoFeatureExtractor.from_pretrained(_UpperCAmelCase) self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def snake_case__ ( self) -> List[str]: """simple docstring""" class A_ ( snake_case__ ): '''simple docstring''' _SCREAMING_SNAKE_CASE : List[Any] = True try: AutoConfig.register('''custom''' , _UpperCAmelCase) AutoFeatureExtractor.register(_UpperCAmelCase , _UpperCAmelCase) # If remote code is not set, the default is to use local _UpperCAmelCase : Tuple = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''') self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''') self.assertTrue(feature_extractor.is_local) # If remote code is disabled, we load the local one. _UpperCAmelCase : List[Any] = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=_UpperCAmelCase) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''') self.assertTrue(feature_extractor.is_local) # If remote is enabled, we load from the Hub _UpperCAmelCase : Optional[Any] = AutoFeatureExtractor.from_pretrained( '''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=_UpperCAmelCase) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''') self.assertTrue(not hasattr(_UpperCAmelCase , '''is_local''')) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
485
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) a = { """configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ """FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""", """FalconForCausalLM""", """FalconModel""", """FalconPreTrainedModel""", """FalconForSequenceClassification""", """FalconForTokenClassification""", """FalconForQuestionAnswering""", ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
687
0
"""simple docstring""" def UpperCAmelCase ( A : Optional[Any] , A : Any ): '''simple docstring''' _UpperCAmelCase = [0 for i in range(r + 1 )] # nc0 = 1 _UpperCAmelCase = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. _UpperCAmelCase = min(A , A ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
573
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" def __init__( self: str , _UpperCAmelCase: str , _UpperCAmelCase: Optional[int]=7 , _UpperCAmelCase: Union[str, Any]=3 , _UpperCAmelCase: int=18 , _UpperCAmelCase: List[Any]=30 , _UpperCAmelCase: List[Any]=400 , _UpperCAmelCase: Optional[Any]=True , _UpperCAmelCase: Any=None , _UpperCAmelCase: Any=True , _UpperCAmelCase: int=None , _UpperCAmelCase: Union[str, Any]=True , ): _lowerCAmelCase :Tuple = size if size is not None else {'shortest_edge': 20} _lowerCAmelCase :str = crop_size if crop_size is not None else {'height': 18, 'width': 18} _lowerCAmelCase :str = parent _lowerCAmelCase :List[Any] = batch_size _lowerCAmelCase :Optional[Any] = num_channels _lowerCAmelCase :Optional[Any] = image_size _lowerCAmelCase :int = min_resolution _lowerCAmelCase :List[str] = max_resolution _lowerCAmelCase :List[str] = do_resize _lowerCAmelCase :Optional[int] = size _lowerCAmelCase :str = do_center_crop _lowerCAmelCase :int = crop_size _lowerCAmelCase :Optional[int] = do_flip_channel_order def SCREAMING_SNAKE_CASE__ ( self: List[Any] ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class UpperCAmelCase_ (snake_case__ , unittest.TestCase ): """simple docstring""" lowerCamelCase : Any = MobileViTImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): _lowerCAmelCase :Optional[Any] = MobileViTImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE__ ( self: str ): return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ): _lowerCAmelCase :str = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize' ) ) self.assertTrue(hasattr(_UpperCAmelCase , 'size' ) ) self.assertTrue(hasattr(_UpperCAmelCase , 'do_center_crop' ) ) self.assertTrue(hasattr(_UpperCAmelCase , 'center_crop' ) ) self.assertTrue(hasattr(_UpperCAmelCase , 'do_flip_channel_order' ) ) def SCREAMING_SNAKE_CASE__ ( self: Any ): _lowerCAmelCase :Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 20} ) self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} ) _lowerCAmelCase :Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'shortest_edge': 42} ) self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} ) def SCREAMING_SNAKE_CASE__ ( self: List[Any] ): pass def SCREAMING_SNAKE_CASE__ ( self: int ): # Initialize image_processing _lowerCAmelCase :Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _lowerCAmelCase :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , Image.Image ) # Test not batched input _lowerCAmelCase :Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _lowerCAmelCase :str = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def SCREAMING_SNAKE_CASE__ ( self: Tuple ): # Initialize image_processing _lowerCAmelCase :int = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _lowerCAmelCase :List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , np.ndarray ) # Test not batched input _lowerCAmelCase :List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _lowerCAmelCase :List[str] = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def SCREAMING_SNAKE_CASE__ ( self: Any ): # Initialize image_processing _lowerCAmelCase :Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _lowerCAmelCase :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , torch.Tensor ) # Test not batched input _lowerCAmelCase :List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _lowerCAmelCase :int = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
687
0
from __future__ import annotations from decimal import Decimal from math import * # noqa: F403 from sympy import diff def __lowerCAmelCase ( A_ : str , A_ : float | Decimal , A_ : float = 10**-10 ) -> Union[str, Any]: __UpperCAmelCase = a while True: __UpperCAmelCase = Decimal(A_ ) - ( Decimal(eval(A_ ) ) / Decimal(eval(str(diff(A_ ) ) ) ) # noqa: S307 ) # This number dictates the accuracy of the answer if abs(eval(A_ ) ) < precision: # noqa: S307 return float(A_ ) # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}") # Find root of polynomial print(F"The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}") # Find Square Root of 5 print(F"The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}") # Exponential Roots print(F"The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}")
221
import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class UpperCAmelCase_ (datasets.BuilderConfig ): """simple docstring""" lowerCamelCase : Optional[datasets.Features] = None class UpperCAmelCase_ (datasets.ArrowBasedBuilder ): """simple docstring""" lowerCamelCase : Any = PandasConfig def SCREAMING_SNAKE_CASE__ ( self: int ): return datasets.DatasetInfo(features=self.config.features ) def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: List[str] ): if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) _lowerCAmelCase :Dict = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_UpperCAmelCase , (str, list, tuple) ): _lowerCAmelCase :Any = data_files if isinstance(_UpperCAmelCase , _UpperCAmelCase ): _lowerCAmelCase :Dict = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive _lowerCAmelCase :List[Any] = [dl_manager.iter_files(_UpperCAmelCase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )] _lowerCAmelCase :Any = [] for split_name, files in data_files.items(): if isinstance(_UpperCAmelCase , _UpperCAmelCase ): _lowerCAmelCase :str = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive _lowerCAmelCase :Union[str, Any] = [dl_manager.iter_files(_UpperCAmelCase ) for file in files] splits.append(datasets.SplitGenerator(name=_UpperCAmelCase , gen_kwargs={'files': files} ) ) return splits def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: pa.Table ): if self.config.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example _lowerCAmelCase :str = table_cast(_UpperCAmelCase , self.config.features.arrow_schema ) return pa_table def SCREAMING_SNAKE_CASE__ ( self: List[str] , _UpperCAmelCase: Dict ): for i, file in enumerate(itertools.chain.from_iterable(_UpperCAmelCase ) ): with open(_UpperCAmelCase , 'rb' ) as f: _lowerCAmelCase :Optional[Any] = pa.Table.from_pandas(pd.read_pickle(_UpperCAmelCase ) ) yield i, self._cast_table(_UpperCAmelCase )
687
0
'''simple docstring''' def lowercase_ ( __A : int ) -> Union[str, Any]: """simple docstring""" return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number if __name__ == "__main__": print('Program to check whether a number is a Perfect number or not...') SCREAMING_SNAKE_CASE = int(input('Enter number: ').strip()) print(f"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
94
import glob import os import random from string import ascii_lowercase, digits import cva a = """""" a = """""" a = """""" a = 1 # (0 is vertical, 1 is horizontal) def UpperCamelCase_( ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase :Union[str, Any] = get_dataset(__magic_name__ , __magic_name__ ) print('Processing...' ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :str = update_image_and_anno(__magic_name__ , __magic_name__ , __magic_name__ ) for index, image in enumerate(__magic_name__ ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' _lowerCAmelCase :Optional[Any] = random_chars(32 ) _lowerCAmelCase :str = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0] _lowerCAmelCase :Tuple = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}""" cva.imwrite(f"""/{file_root}.jpg""" , __magic_name__ , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f"""Success {index+1}/{len(__magic_name__ )} with {file_name}""" ) _lowerCAmelCase :str = [] for anno in new_annos[index]: _lowerCAmelCase :List[str] = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}""" annos_list.append(__magic_name__ ) with open(f"""/{file_root}.txt""" , 'w' ) as outfile: outfile.write('\n'.join(line for line in annos_list ) ) def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str ): """simple docstring""" _lowerCAmelCase :int = [] _lowerCAmelCase :Union[str, Any] = [] for label_file in glob.glob(os.path.join(__magic_name__ , '*.txt' ) ): _lowerCAmelCase :Optional[int] = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0] with open(__magic_name__ ) as in_file: _lowerCAmelCase :Union[str, Any] = in_file.readlines() _lowerCAmelCase :List[Any] = os.path.join(__magic_name__ , f"""{label_name}.jpg""" ) _lowerCAmelCase :Tuple = [] for obj_list in obj_lists: _lowerCAmelCase :Union[str, Any] = obj_list.rstrip('\n' ).split(' ' ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(__magic_name__ ) labels.append(__magic_name__ ) return img_paths, labels def UpperCamelCase_( __magic_name__ : list , __magic_name__ : list , __magic_name__ : int = 1 ): """simple docstring""" _lowerCAmelCase :str = [] _lowerCAmelCase :Any = [] _lowerCAmelCase :Optional[Any] = [] for idx in range(len(__magic_name__ ) ): _lowerCAmelCase :Optional[int] = [] _lowerCAmelCase :Optional[Any] = img_list[idx] path_list.append(__magic_name__ ) _lowerCAmelCase :List[str] = anno_list[idx] _lowerCAmelCase :Optional[Any] = cva.imread(__magic_name__ ) if flip_type == 1: _lowerCAmelCase :List[Any] = cva.flip(__magic_name__ , __magic_name__ ) for bbox in img_annos: _lowerCAmelCase :List[Any] = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: _lowerCAmelCase :List[str] = cva.flip(__magic_name__ , __magic_name__ ) for bbox in img_annos: _lowerCAmelCase :List[str] = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(__magic_name__ ) new_imgs_list.append(__magic_name__ ) return new_imgs_list, new_annos_lists, path_list def UpperCamelCase_( __magic_name__ : int = 32 ): """simple docstring""" assert number_char > 1, "The number of character should greater than 1" _lowerCAmelCase :str = ascii_lowercase + digits return "".join(random.choice(__magic_name__ ) for _ in range(__magic_name__ ) ) if __name__ == "__main__": main() print("""DONE ✅""")
687
0
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation def _UpperCAmelCase (UpperCamelCase_ : Any ): '''simple docstring''' _lowerCAmelCase : Optional[int] = 384 _lowerCAmelCase : str = 7 if "tiny" in model_name: _lowerCAmelCase : Optional[Any] = 96 _lowerCAmelCase : Optional[int] = (2, 2, 6, 2) _lowerCAmelCase : int = (3, 6, 12, 24) elif "small" in model_name: _lowerCAmelCase : Tuple = 96 _lowerCAmelCase : Optional[int] = (2, 2, 18, 2) _lowerCAmelCase : List[str] = (3, 6, 12, 24) elif "base" in model_name: _lowerCAmelCase : Union[str, Any] = 128 _lowerCAmelCase : List[Any] = (2, 2, 18, 2) _lowerCAmelCase : Optional[int] = (4, 8, 16, 32) _lowerCAmelCase : Dict = 12 _lowerCAmelCase : Optional[Any] = 512 elif "large" in model_name: _lowerCAmelCase : List[str] = 192 _lowerCAmelCase : str = (2, 2, 18, 2) _lowerCAmelCase : List[Any] = (6, 12, 24, 48) _lowerCAmelCase : Optional[int] = 12 _lowerCAmelCase : List[str] = 768 # set label information _lowerCAmelCase : List[str] = 150 _lowerCAmelCase : Optional[int] = 'huggingface/label-files' _lowerCAmelCase : Tuple = 'ade20k-id2label.json' _lowerCAmelCase : Union[str, Any] = json.load(open(hf_hub_download(UpperCamelCase_ , UpperCamelCase_ , repo_type="""dataset""" ) , """r""" ) ) _lowerCAmelCase : Dict = {int(UpperCamelCase_ ): v for k, v in idalabel.items()} _lowerCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()} _lowerCAmelCase : Tuple = SwinConfig( embed_dim=UpperCamelCase_ , depths=UpperCamelCase_ , num_heads=UpperCamelCase_ , window_size=UpperCamelCase_ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , ) _lowerCAmelCase : Dict = UperNetConfig( backbone_config=UpperCamelCase_ , auxiliary_in_channels=UpperCamelCase_ , num_labels=UpperCamelCase_ , idalabel=UpperCamelCase_ , labelaid=UpperCamelCase_ , ) return config def _UpperCAmelCase (UpperCamelCase_ : Union[str, Any] ): '''simple docstring''' _lowerCAmelCase : Tuple = [] # fmt: off # stem rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") ) rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") ) rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F"backbone.stages.{i}.blocks.{j}.norm1.weight", F"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") ) rename_keys.append((F"backbone.stages.{i}.blocks.{j}.norm1.bias", F"backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") ) rename_keys.append((F"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table", F"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") ) rename_keys.append((F"backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index", F"backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") ) rename_keys.append((F"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight", F"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") ) rename_keys.append((F"backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias", F"backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") ) rename_keys.append((F"backbone.stages.{i}.blocks.{j}.norm2.weight", F"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") ) rename_keys.append((F"backbone.stages.{i}.blocks.{j}.norm2.bias", F"backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") ) rename_keys.append((F"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight", F"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") ) rename_keys.append((F"backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias", F"backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") ) rename_keys.append((F"backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight", F"backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight") ) rename_keys.append((F"backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias", F"backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias") ) if i < 3: rename_keys.append((F"backbone.stages.{i}.downsample.reduction.weight", F"backbone.encoder.layers.{i}.downsample.reduction.weight") ) rename_keys.append((F"backbone.stages.{i}.downsample.norm.weight", F"backbone.encoder.layers.{i}.downsample.norm.weight") ) rename_keys.append((F"backbone.stages.{i}.downsample.norm.bias", F"backbone.encoder.layers.{i}.downsample.norm.bias") ) rename_keys.append((F"backbone.norm{i}.weight", F"backbone.hidden_states_norms.stage{i+1}.weight") ) rename_keys.append((F"backbone.norm{i}.bias", F"backbone.hidden_states_norms.stage{i+1}.bias") ) # decode head rename_keys.extend( [ ("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""), ("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""), ("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""), ("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""), ] ) # fmt: on return rename_keys def _UpperCAmelCase (UpperCamelCase_ : int , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int] ): '''simple docstring''' _lowerCAmelCase : List[Any] = dct.pop(UpperCamelCase_ ) _lowerCAmelCase : Union[str, Any] = val def _UpperCAmelCase (UpperCamelCase_ : List[Any] , UpperCamelCase_ : str ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): _lowerCAmelCase : int = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) _lowerCAmelCase : Tuple = state_dict.pop(F"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight" ) _lowerCAmelCase : Dict = state_dict.pop(F"backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias" ) # next, add query, keys and values (in that order) to the state dict _lowerCAmelCase : Union[str, Any] = in_proj_weight[:dim, :] _lowerCAmelCase : Tuple = in_proj_bias[: dim] _lowerCAmelCase : Optional[Any] = in_proj_weight[ dim : dim * 2, : ] _lowerCAmelCase : Optional[int] = in_proj_bias[ dim : dim * 2 ] _lowerCAmelCase : Union[str, Any] = in_proj_weight[ -dim :, : ] _lowerCAmelCase : Optional[int] = in_proj_bias[-dim :] # fmt: on def _UpperCAmelCase (UpperCamelCase_ : int ): '''simple docstring''' _lowerCAmelCase : Union[str, Any] = x.shape _lowerCAmelCase : Tuple = x.reshape(UpperCamelCase_ , 4 , in_channel // 4 ) _lowerCAmelCase : str = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(UpperCamelCase_ , UpperCamelCase_ ) return x def _UpperCAmelCase (UpperCamelCase_ : int ): '''simple docstring''' _lowerCAmelCase : str = x.shape _lowerCAmelCase : Union[str, Any] = x.reshape(UpperCamelCase_ , in_channel // 4 , 4 ) _lowerCAmelCase : List[str] = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(UpperCamelCase_ , UpperCamelCase_ ) return x def _UpperCAmelCase (UpperCamelCase_ : Optional[Any] ): '''simple docstring''' _lowerCAmelCase : Optional[Any] = x.shape[0] _lowerCAmelCase : Optional[int] = x.reshape(4 , in_channel // 4 ) _lowerCAmelCase : Optional[int] = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(UpperCamelCase_ ) return x def _UpperCAmelCase (UpperCamelCase_ : List[str] ): '''simple docstring''' _lowerCAmelCase : Optional[int] = x.shape[0] _lowerCAmelCase : int = x.reshape(in_channel // 4 , 4 ) _lowerCAmelCase : Optional[int] = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(UpperCamelCase_ ) return x def _UpperCAmelCase (UpperCamelCase_ : str , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any] ): '''simple docstring''' _lowerCAmelCase : Any = { 'upernet-swin-tiny': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth', 'upernet-swin-small': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth', 'upernet-swin-base': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth', 'upernet-swin-large': 'https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth', } _lowerCAmelCase : List[Any] = model_name_to_url[model_name] _lowerCAmelCase : Union[str, Any] = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location="""cpu""" , file_name=UpperCamelCase_ )[ 'state_dict' ] for name, param in state_dict.items(): print(UpperCamelCase_ , param.shape ) _lowerCAmelCase : Union[str, Any] = get_upernet_config(UpperCamelCase_ ) _lowerCAmelCase : int = UperNetForSemanticSegmentation(UpperCamelCase_ ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): _lowerCAmelCase : Union[str, Any] = state_dict.pop(UpperCamelCase_ ) if "bn" in key: _lowerCAmelCase : List[str] = key.replace("""bn""" , """batch_norm""" ) _lowerCAmelCase : Optional[int] = val # rename keys _lowerCAmelCase : Optional[Any] = create_rename_keys(UpperCamelCase_ ) for src, dest in rename_keys: rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) read_in_q_k_v(UpperCamelCase_ , config.backbone_config ) # fix downsample parameters for key, value in state_dict.items(): if "downsample" in key: if "reduction" in key: _lowerCAmelCase : Union[str, Any] = reverse_correct_unfold_reduction_order(UpperCamelCase_ ) if "norm" in key: _lowerCAmelCase : Optional[Any] = reverse_correct_unfold_norm_order(UpperCamelCase_ ) model.load_state_dict(UpperCamelCase_ ) # verify on image _lowerCAmelCase : List[str] = 'https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg' _lowerCAmelCase : Union[str, Any] = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ).convert("""RGB""" ) _lowerCAmelCase : Dict = SegformerImageProcessor() _lowerCAmelCase : Dict = processor(UpperCamelCase_ , return_tensors="""pt""" ).pixel_values with torch.no_grad(): _lowerCAmelCase : Dict = model(UpperCamelCase_ ) _lowerCAmelCase : List[str] = outputs.logits print(logits.shape ) print("""First values of logits:""" , logits[0, 0, :3, :3] ) # assert values if model_name == "upernet-swin-tiny": _lowerCAmelCase : Dict = torch.tensor( [[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ) elif model_name == "upernet-swin-small": _lowerCAmelCase : Any = torch.tensor( [[-7.1_921, -7.1_921, -6.9_532], [-7.1_921, -7.1_921, -6.9_532], [-7.0_908, -7.0_908, -6.8_534]] ) elif model_name == "upernet-swin-base": _lowerCAmelCase : Any = torch.tensor( [[-6.5_851, -6.5_851, -6.4_330], [-6.5_851, -6.5_851, -6.4_330], [-6.4_763, -6.4_763, -6.3_254]] ) elif model_name == "upernet-swin-large": _lowerCAmelCase : Optional[Any] = torch.tensor( [[-7.5_297, -7.5_297, -7.3_802], [-7.5_297, -7.5_297, -7.3_802], [-7.4_044, -7.4_044, -7.2_586]] ) print("""Logits:""" , outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCamelCase_ , atol=1E-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(F"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(UpperCamelCase_ ) print(F"Saving processor to {pytorch_dump_folder_path}" ) processor.save_pretrained(UpperCamelCase_ ) if push_to_hub: print(F"Pushing model and processor for {model_name} to hub" ) model.push_to_hub(F"openmmlab/{model_name}" ) processor.push_to_hub(F"openmmlab/{model_name}" ) if __name__ == "__main__": _lowerCamelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="upernet-swin-tiny", type=str, choices=[F'''upernet-swin-{size}''' for size in ["tiny", "small", "base", "large"]], help="Name of the Swin + UperNet model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) _lowerCamelCase : Any = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
429
import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging a = logging.get_logger(__name__) def UpperCamelCase_( __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ): """simple docstring""" _lowerCAmelCase :Optional[Any] = nn.functional.normalize(__magic_name__ ) _lowerCAmelCase :List[str] = nn.functional.normalize(__magic_name__ ) return torch.mm(__magic_name__ , normalized_text_embeds.t() ) class UpperCAmelCase_ (snake_case__ ): """simple docstring""" lowerCamelCase : str = CLIPConfig lowerCamelCase : Any = ['CLIPEncoderLayer'] def __init__( self: Optional[int] , _UpperCAmelCase: CLIPConfig ): super().__init__(_UpperCAmelCase ) _lowerCAmelCase :Any = CLIPVisionModel(config.vision_config ) _lowerCAmelCase :Optional[int] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=_UpperCAmelCase ) _lowerCAmelCase :int = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=_UpperCAmelCase ) _lowerCAmelCase :Any = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=_UpperCAmelCase ) _lowerCAmelCase :str = nn.Parameter(torch.ones(17 ) , requires_grad=_UpperCAmelCase ) _lowerCAmelCase :Optional[int] = nn.Parameter(torch.ones(3 ) , requires_grad=_UpperCAmelCase ) @torch.no_grad() def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Dict ): _lowerCAmelCase :str = self.vision_model(_UpperCAmelCase )[1] # pooled_output _lowerCAmelCase :Union[str, Any] = self.visual_projection(_UpperCAmelCase ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 _lowerCAmelCase :Optional[int] = cosine_distance(_UpperCAmelCase , self.special_care_embeds ).cpu().float().numpy() _lowerCAmelCase :List[str] = cosine_distance(_UpperCAmelCase , self.concept_embeds ).cpu().float().numpy() _lowerCAmelCase :str = [] _lowerCAmelCase :List[Any] = image_embeds.shape[0] for i in range(_UpperCAmelCase ): _lowerCAmelCase :Optional[Any] = {'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images _lowerCAmelCase :List[Any] = 0.0 for concept_idx in range(len(special_cos_dist[0] ) ): _lowerCAmelCase :List[Any] = special_cos_dist[i][concept_idx] _lowerCAmelCase :Dict = self.special_care_embeds_weights[concept_idx].item() _lowerCAmelCase :List[Any] = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img['special_scores'][concept_idx]} ) _lowerCAmelCase :Any = 0.0_1 for concept_idx in range(len(cos_dist[0] ) ): _lowerCAmelCase :Union[str, Any] = cos_dist[i][concept_idx] _lowerCAmelCase :str = self.concept_embeds_weights[concept_idx].item() _lowerCAmelCase :str = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(_UpperCAmelCase ) result.append(_UpperCAmelCase ) _lowerCAmelCase :Any = [len(res['bad_concepts'] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def SCREAMING_SNAKE_CASE__ ( self: str , _UpperCAmelCase: torch.FloatTensor , _UpperCAmelCase: torch.FloatTensor ): _lowerCAmelCase :Optional[int] = self.vision_model(_UpperCAmelCase )[1] # pooled_output _lowerCAmelCase :Union[str, Any] = self.visual_projection(_UpperCAmelCase ) _lowerCAmelCase :Dict = cosine_distance(_UpperCAmelCase , self.special_care_embeds ) _lowerCAmelCase :List[str] = cosine_distance(_UpperCAmelCase , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images _lowerCAmelCase :Any = 0.0 _lowerCAmelCase :Union[str, Any] = special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) _lowerCAmelCase :Tuple = torch.any(special_scores > 0 , dim=1 ) _lowerCAmelCase :List[str] = special_care * 0.0_1 _lowerCAmelCase :Any = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) _lowerCAmelCase :Optional[Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) _lowerCAmelCase :List[str] = torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
687
0
"""simple docstring""" def snake_case__ ( _lowerCamelCase ) ->Any: """simple docstring""" __lowercase : List[str] = 0 # if input_string is "aba" than new_input_string become "a|b|a" __lowercase : Any = '' __lowercase : Dict = '' # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(_lowerCamelCase ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring __lowercase : Union[str, Any] = 0, 0 # length[i] shows the length of palindromic substring with center i __lowercase : Tuple = [1 for i in range(len(_lowerCamelCase ) )] # for each character in new_string find corresponding palindromic string __lowercase : Any = 0 for j in range(len(_lowerCamelCase ) ): __lowercase : str = 1 if j > r else min(length[l + r - j] // 2, r - j + 1 ) while ( j - k >= 0 and j + k < len(_lowerCamelCase ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 __lowercase : int = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: __lowercase : str = j - k + 1 # noqa: E741 __lowercase : Optional[int] = j + k - 1 # update max_length and start position if max_length < length[j]: __lowercase : List[Any] = length[j] __lowercase : Optional[Any] = j # create that string __lowercase : Tuple = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
575
from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance a = 6_3_7_8_1_3_7.0 a = 6_3_5_6_7_5_2.3_1_4_2_4_5 a = 6_378_137 def UpperCamelCase_( __magic_name__ : float , __magic_name__ : float , __magic_name__ : float , __magic_name__ : float ): """simple docstring""" _lowerCAmelCase :List[Any] = (AXIS_A - AXIS_B) / AXIS_A # Parametric latitudes # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude _lowerCAmelCase :Union[str, Any] = atan((1 - flattening) * tan(radians(__magic_name__ ) ) ) _lowerCAmelCase :List[str] = atan((1 - flattening) * tan(radians(__magic_name__ ) ) ) # Compute central angle between two points # using haversine theta. sigma = haversine_distance / equatorial radius _lowerCAmelCase :int = haversine_distance(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) / EQUATORIAL_RADIUS # Intermediate P and Q values _lowerCAmelCase :str = (b_lata + b_lata) / 2 _lowerCAmelCase :Tuple = (b_lata - b_lata) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) _lowerCAmelCase :str = (sin(__magic_name__ ) ** 2) * (cos(__magic_name__ ) ** 2) _lowerCAmelCase :Optional[int] = cos(sigma / 2 ) ** 2 _lowerCAmelCase :List[Any] = (sigma - sin(__magic_name__ )) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) _lowerCAmelCase :Dict = (cos(__magic_name__ ) ** 2) * (sin(__magic_name__ ) ** 2) _lowerCAmelCase :str = sin(sigma / 2 ) ** 2 _lowerCAmelCase :Union[str, Any] = (sigma + sin(__magic_name__ )) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
687
0
"""simple docstring""" from urllib.parse import quote import pytest from datasets.utils.hub import hf_hub_url @pytest.mark.parametrize("""repo_id""" , ["""canonical_dataset_name""", """org-name/dataset-name"""] ) @pytest.mark.parametrize("""path""" , ["""filename.csv""", """filename with blanks.csv"""] ) @pytest.mark.parametrize("""revision""" , [None, """v2"""] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: _SCREAMING_SNAKE_CASE : List[str] = hf_hub_url(repo_id=__SCREAMING_SNAKE_CASE , path=__SCREAMING_SNAKE_CASE , revision=__SCREAMING_SNAKE_CASE ) assert url == F"""https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(__SCREAMING_SNAKE_CASE )}"""
338
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) class UpperCAmelCase_ (snake_case__ ): """simple docstring""" lowerCamelCase : Dict = 'encoder-decoder' lowerCamelCase : Optional[Any] = True def __init__( self: str , **_UpperCAmelCase: int ): super().__init__(**_UpperCAmelCase ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" _lowerCAmelCase :Optional[Any] = kwargs.pop('encoder' ) _lowerCAmelCase :Dict = encoder_config.pop('model_type' ) _lowerCAmelCase :str = kwargs.pop('decoder' ) _lowerCAmelCase :str = decoder_config.pop('model_type' ) from ..auto.configuration_auto import AutoConfig _lowerCAmelCase :str = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase ) _lowerCAmelCase :Tuple = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase ) _lowerCAmelCase :Any = True @classmethod def SCREAMING_SNAKE_CASE__ ( cls: Tuple , _UpperCAmelCase: PretrainedConfig , _UpperCAmelCase: PretrainedConfig , **_UpperCAmelCase: str ): logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' ) _lowerCAmelCase :Dict = True _lowerCAmelCase :List[str] = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Dict ): _lowerCAmelCase :Union[str, Any] = copy.deepcopy(self.__dict__ ) _lowerCAmelCase :Optional[int] = self.encoder.to_dict() _lowerCAmelCase :Union[str, Any] = self.decoder.to_dict() _lowerCAmelCase :List[str] = self.__class__.model_type return output
687
0
from __future__ import annotations def __UpperCamelCase ( lowercase__ : int | str ) -> int: '''simple docstring''' lowerCAmelCase_ : Dict = str(lowercase__ ) return n == n[::-1] def __UpperCamelCase ( lowercase__ : int = 1000000 ) -> Dict: '''simple docstring''' lowerCAmelCase_ : str = 0 for i in range(1 , lowercase__ ): if is_palindrome(lowercase__ ) and is_palindrome(bin(lowercase__ ).split("""b""" )[1] ): total += i return total if __name__ == "__main__": print(solution(int(str(input().strip()))))
600
import collections import inspect import unittest from transformers import FocalNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, ) from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCAmelCase_ : """simple docstring""" def __init__( self: int , _UpperCAmelCase: Any , _UpperCAmelCase: Tuple=13 , _UpperCAmelCase: Optional[Any]=32 , _UpperCAmelCase: List[Any]=2 , _UpperCAmelCase: Optional[int]=3 , _UpperCAmelCase: Optional[int]=16 , _UpperCAmelCase: Optional[Any]=[32, 64, 128] , _UpperCAmelCase: Optional[int]=[1, 2, 1] , _UpperCAmelCase: int=[2, 2, 4] , _UpperCAmelCase: List[str]=2 , _UpperCAmelCase: Dict=2.0 , _UpperCAmelCase: Optional[int]=True , _UpperCAmelCase: str=0.0 , _UpperCAmelCase: int=0.0 , _UpperCAmelCase: str=0.1 , _UpperCAmelCase: Dict="gelu" , _UpperCAmelCase: Optional[Any]=False , _UpperCAmelCase: Union[str, Any]=True , _UpperCAmelCase: Union[str, Any]=0.0_2 , _UpperCAmelCase: Optional[int]=1e-5 , _UpperCAmelCase: Optional[int]=True , _UpperCAmelCase: Optional[Any]=None , _UpperCAmelCase: Tuple=True , _UpperCAmelCase: str=10 , _UpperCAmelCase: int=8 , _UpperCAmelCase: List[Any]=["stage1", "stage2"] , _UpperCAmelCase: List[Any]=[1, 2] , ): _lowerCAmelCase :Optional[int] = parent _lowerCAmelCase :Dict = batch_size _lowerCAmelCase :Optional[Any] = image_size _lowerCAmelCase :Optional[Any] = patch_size _lowerCAmelCase :List[Any] = num_channels _lowerCAmelCase :Optional[int] = embed_dim _lowerCAmelCase :List[str] = hidden_sizes _lowerCAmelCase :Union[str, Any] = depths _lowerCAmelCase :int = num_heads _lowerCAmelCase :Any = window_size _lowerCAmelCase :List[Any] = mlp_ratio _lowerCAmelCase :Optional[int] = qkv_bias _lowerCAmelCase :Union[str, Any] = hidden_dropout_prob _lowerCAmelCase :Optional[int] = attention_probs_dropout_prob _lowerCAmelCase :Dict = drop_path_rate _lowerCAmelCase :List[Any] = hidden_act _lowerCAmelCase :Tuple = use_absolute_embeddings _lowerCAmelCase :Optional[int] = patch_norm _lowerCAmelCase :Optional[Any] = layer_norm_eps _lowerCAmelCase :Union[str, Any] = initializer_range _lowerCAmelCase :List[str] = is_training _lowerCAmelCase :str = scope _lowerCAmelCase :Optional[int] = use_labels _lowerCAmelCase :List[Any] = type_sequence_label_size _lowerCAmelCase :Union[str, Any] = encoder_stride _lowerCAmelCase :Optional[int] = out_features _lowerCAmelCase :List[str] = out_indices def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): _lowerCAmelCase :Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCAmelCase :Dict = None if self.use_labels: _lowerCAmelCase :List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase :str = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self: int ): return FocalNetConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple ): _lowerCAmelCase :List[Any] = FocalNetModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _lowerCAmelCase :List[str] = model(_UpperCAmelCase ) _lowerCAmelCase :Union[str, Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) _lowerCAmelCase :List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[Any] ): _lowerCAmelCase :Union[str, Any] = FocalNetBackbone(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _lowerCAmelCase :str = model(_UpperCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] ) # verify backbone works with out_features=None _lowerCAmelCase :Optional[int] = None _lowerCAmelCase :Dict = FocalNetBackbone(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _lowerCAmelCase :Any = model(_UpperCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: int , _UpperCAmelCase: Optional[Any] ): _lowerCAmelCase :Any = FocalNetForMaskedImageModeling(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _lowerCAmelCase :str = model(_UpperCAmelCase ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images _lowerCAmelCase :List[Any] = 1 _lowerCAmelCase :List[Any] = FocalNetForMaskedImageModeling(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _lowerCAmelCase :Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _lowerCAmelCase :int = model(_UpperCAmelCase ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: int , _UpperCAmelCase: Dict , _UpperCAmelCase: Optional[int] ): _lowerCAmelCase :Union[str, Any] = self.type_sequence_label_size _lowerCAmelCase :Dict = FocalNetForImageClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _lowerCAmelCase :Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _lowerCAmelCase :Optional[int] = 1 _lowerCAmelCase :Tuple = FocalNetForImageClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _lowerCAmelCase :Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _lowerCAmelCase :List[str] = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): _lowerCAmelCase :Tuple = self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :str = config_and_inputs _lowerCAmelCase :List[str] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase_ (snake_case__ , snake_case__ , unittest.TestCase ): """simple docstring""" lowerCamelCase : Optional[int] = ( ( FocalNetModel, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetBackbone, ) if is_torch_available() else () ) lowerCamelCase : Optional[Any] = ( {'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification} if is_torch_available() else {} ) lowerCamelCase : Tuple = False lowerCamelCase : Union[str, Any] = False lowerCamelCase : Union[str, Any] = False lowerCamelCase : Any = False lowerCamelCase : List[Any] = False def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): _lowerCAmelCase :Tuple = FocalNetModelTester(self ) _lowerCAmelCase :str = ConfigTester(self , config_class=_UpperCAmelCase , embed_dim=37 , has_text_modality=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: List[str] ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ): return def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): _lowerCAmelCase :List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: int ): _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: List[str] ): _lowerCAmelCase :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: str ): _lowerCAmelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase ) @unittest.skip(reason='FocalNet does not use inputs_embeds' ) def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): pass @unittest.skip(reason='FocalNet does not use feedforward chunking' ) def SCREAMING_SNAKE_CASE__ ( self: str ): pass def SCREAMING_SNAKE_CASE__ ( self: Tuple ): _lowerCAmelCase , _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: _lowerCAmelCase :Optional[Any] = model_class(_UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _lowerCAmelCase :Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) ) def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ): _lowerCAmelCase , _lowerCAmelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: _lowerCAmelCase :Tuple = model_class(_UpperCAmelCase ) _lowerCAmelCase :Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase :int = [*signature.parameters.keys()] _lowerCAmelCase :List[str] = ['pixel_values'] self.assertListEqual(arg_names[:1] , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Any , _UpperCAmelCase: int , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Optional[int] ): _lowerCAmelCase :Union[str, Any] = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() with torch.no_grad(): _lowerCAmelCase :Optional[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) _lowerCAmelCase :List[Any] = outputs.hidden_states _lowerCAmelCase :str = getattr( self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase ) # FocalNet has a different seq_length _lowerCAmelCase :Any = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _lowerCAmelCase :List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) _lowerCAmelCase :List[str] = outputs.reshaped_hidden_states self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :int = reshaped_hidden_states[0].shape _lowerCAmelCase :Optional[int] = ( reshaped_hidden_states[0].view(_UpperCAmelCase , _UpperCAmelCase , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def SCREAMING_SNAKE_CASE__ ( self: List[Any] ): _lowerCAmelCase , _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase :List[str] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes[:-1]: _lowerCAmelCase :Optional[int] = True self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCAmelCase :Dict = True self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ): _lowerCAmelCase , _lowerCAmelCase :str = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase :str = 3 _lowerCAmelCase :Union[str, Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) _lowerCAmelCase :int = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _lowerCAmelCase :Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) _lowerCAmelCase :Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes[:-1]: _lowerCAmelCase :List[str] = True self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCAmelCase :Union[str, Any] = True self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) ) @slow def SCREAMING_SNAKE_CASE__ ( self: int ): for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase :List[Any] = FocalNetModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Tuple ): _lowerCAmelCase , _lowerCAmelCase :int = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase :Optional[int] = _config_zero_init(_UpperCAmelCase ) for model_class in self.all_model_classes: _lowerCAmelCase :str = model_class(config=_UpperCAmelCase ) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @require_vision @require_torch class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" @cached_property def SCREAMING_SNAKE_CASE__ ( self: Dict ): # TODO update organization return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE__ ( self: Any ): _lowerCAmelCase :Tuple = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(_UpperCAmelCase ) _lowerCAmelCase :Union[str, Any] = self.default_image_processor _lowerCAmelCase :Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) _lowerCAmelCase :Any = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase ) # forward pass with torch.no_grad(): _lowerCAmelCase :Dict = model(**_UpperCAmelCase ) # verify the logits _lowerCAmelCase :str = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _UpperCAmelCase ) _lowerCAmelCase :Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(_UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) ) self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 ) @require_torch class UpperCAmelCase_ (snake_case__ , unittest.TestCase ): """simple docstring""" lowerCamelCase : int = (FocalNetBackbone,) if is_torch_available() else () lowerCamelCase : str = FocalNetConfig lowerCamelCase : Union[str, Any] = False def SCREAMING_SNAKE_CASE__ ( self: List[Any] ): _lowerCAmelCase :Any = FocalNetModelTester(self )
687
0
'''simple docstring''' import os import random import sys from . import cryptomath_module as cryptoMath # noqa: N812 from . import rabin_miller as rabinMiller # noqa: N812 def lowerCamelCase__ ( ): '''simple docstring''' print('Making key files...' ) make_key_files('rsa' , 1_0_2_4 ) print('Key files generation successful.' ) def lowerCamelCase__ ( __lowerCamelCase : int ): '''simple docstring''' print('Generating prime p...' ) _UpperCAmelCase : Any =rabinMiller.generate_large_prime(__lowerCamelCase ) print('Generating prime q...' ) _UpperCAmelCase : Any =rabinMiller.generate_large_prime(__lowerCamelCase ) _UpperCAmelCase : List[Any] =p * q print('Generating e that is relatively prime to (p - 1) * (q - 1)...' ) while True: _UpperCAmelCase : str =random.randrange(2 ** (key_size - 1) , 2 ** (key_size) ) if cryptoMath.gcd(__lowerCamelCase , (p - 1) * (q - 1) ) == 1: break print('Calculating d that is mod inverse of e...' ) _UpperCAmelCase : List[str] =cryptoMath.find_mod_inverse(__lowerCamelCase , (p - 1) * (q - 1) ) _UpperCAmelCase : Optional[Any] =(n, e) _UpperCAmelCase : Tuple =(n, d) return (public_key, private_key) def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : int ): '''simple docstring''' if os.path.exists(f"{name}_pubkey.txt" ) or os.path.exists(f"{name}_privkey.txt" ): print('\nWARNING:' ) print( f"\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n" 'Use a different name or delete these files and re-run this program.' ) sys.exit() _UpperCAmelCase : Optional[int] =generate_key(__lowerCamelCase ) print(f"\nWriting public key to file {name}_pubkey.txt..." ) with open(f"{name}_pubkey.txt" , 'w' ) as out_file: out_file.write(f"{key_size},{public_key[0]},{public_key[1]}" ) print(f"Writing private key to file {name}_privkey.txt..." ) with open(f"{name}_privkey.txt" , 'w' ) as out_file: out_file.write(f"{key_size},{private_key[0]},{private_key[1]}" ) if __name__ == "__main__": main()
446
import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel a = HfApi() a = {} # fmt: off a = torch.tensor([ -0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7, 1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9, -1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9, 0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7 ]) a = torch.tensor([ -2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6, 1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8, -2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8, 2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5 ]) a = torch.tensor([ -0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9, -0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4, -0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5, 0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3 ]) a = torch.tensor([ 0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2, -0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9, 0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5, -0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5 ]) a = torch.tensor([ 0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3, -0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5, 0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9, -0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6 ]) a = torch.tensor([ 0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8, -0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0, 0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3, -0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1 ]) a = torch.tensor([ 0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2, -0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8, 0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4, -0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0 ]) a = torch.tensor([ 0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2, -0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0, 0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6, -0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3 ]) a = torch.tensor([ -1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0, 1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3, -2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0, 1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1]) a = torch.tensor([ -1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4, 0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1, -2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9, 1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6 ]) a = torch.tensor([ -1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2, 0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7, -2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1, 1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5 ]) a = torch.tensor([ -2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9, 1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1, -3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1, 3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6 ]) a = torch.tensor([ -2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0, 1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8, -2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5, 2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3 ]) a = torch.tensor([ -2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6, 1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8, -3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0, 3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3 ]) a = torch.tensor([ -1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4, 1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1, -2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9, 1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9 ]) # fmt: on a = api.list_models(filter="""diffusers""") for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": a = """/home/patrick/google_checkpoints/""" + mod.modelId.split("""/""")[-1] print(F'''Started running {mod.modelId}!!!''') if mod.modelId.startswith("""CompVis"""): a = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""") else: a = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) a = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) a = torch.tensor([10] * noise.shape[0]) with torch.no_grad(): a = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1E-3 ) print(F'''{mod.modelId} has passed successfully!!!''')
687
0
import os import re import warnings from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_ta import TaTokenizer else: lowerCAmelCase__ : int =None lowerCAmelCase__ : List[str] =logging.get_logger(__name__) lowerCAmelCase__ : Optional[int] ={'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} lowerCAmelCase__ : str ={ 'vocab_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model', 't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model', 't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model', }, 'tokenizer_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json', 't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json', 't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json', }, } # TODO(PVP) - this should be removed in Transformers v5 lowerCAmelCase__ : Any ={ 't5-small': 5_12, 't5-base': 5_12, 't5-large': 5_12, 't5-3b': 5_12, 't5-11b': 5_12, } class __lowercase (snake_case__ ): """simple docstring""" _UpperCAmelCase = VOCAB_FILES_NAMES _UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCAmelCase = ['input_ids', 'attention_mask'] _UpperCAmelCase = TaTokenizer _UpperCAmelCase = [] def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="</s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__=1_0_0 , lowerCAmelCase__=None , **lowerCAmelCase__ , ): """simple docstring""" if extra_ids > 0 and additional_special_tokens is None: SCREAMING_SNAKE_CASE_ : Tuple = [F'''<extra_id_{i}>''' for i in range(_UpperCAmelCase )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra special tokens SCREAMING_SNAKE_CASE_ : Optional[Any] = len(set(filter(lambda lowerCAmelCase__ : bool('extra_id_' in str(_UpperCAmelCase ) ) , _UpperCAmelCase ) ) ) if extra_tokens != extra_ids: raise ValueError( F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are''' ' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids' ' tokens' ) super().__init__( _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , extra_ids=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , ) SCREAMING_SNAKE_CASE_ : str = vocab_file SCREAMING_SNAKE_CASE_ : Optional[Any] = False if not self.vocab_file else True SCREAMING_SNAKE_CASE_ : Union[str, Any] = extra_ids @staticmethod def UpperCamelCase__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ): """simple docstring""" if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes: SCREAMING_SNAKE_CASE_ : Tuple = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( 'This tokenizer was incorrectly instantiated with a model max length of' F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this''' ' behavior is kept to avoid breaking backwards compatibility when padding/encoding with' ' `truncation is True`.\n- Be aware that you SHOULD NOT rely on' F''' {pretrained_model_name_or_path} automatically truncating your input to''' F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences''' F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with''' ' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please' ' instantiate this tokenizer with `model_max_length` set to your preferred value.' , _UpperCAmelCase , ) return max_model_length def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ): """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(_UpperCAmelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join( _UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ): copyfile(self.vocab_file , _UpperCAmelCase ) logger.info(F'''Copy vocab file to {out_vocab_file}''' ) return (out_vocab_file,) def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = token_ids_a + [self.eos_token_id] if token_ids_a is None: return self.prefix_tokens + token_ids_a else: SCREAMING_SNAKE_CASE_ : List[Any] = token_ids_a + [self.eos_token_id] return self.prefix_tokens + token_ids_a + token_ids_a def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def UpperCamelCase__ ( self ): """simple docstring""" return list( set(filter(lambda lowerCAmelCase__ : bool(re.search(r'<extra_id_\d+>' , _UpperCAmelCase ) ) is not None , self.additional_special_tokens ) ) ) def UpperCamelCase__ ( self ): """simple docstring""" return [self.convert_tokens_to_ids(_UpperCAmelCase ) for token in self.get_sentinel_tokens()]
101
import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE__ ( self: int ): _lowerCAmelCase :Optional[int] = 10 def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): _lowerCAmelCase :str = [1, 2, 3, 4] _lowerCAmelCase :Union[str, Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: int ): _lowerCAmelCase :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] _lowerCAmelCase :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ): _lowerCAmelCase :Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] _lowerCAmelCase :Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: List[str] ): _lowerCAmelCase :List[str] = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.' _lowerCAmelCase , _lowerCAmelCase :Optional[Any] = process_story(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , [] ) def SCREAMING_SNAKE_CASE__ ( self: Any ): _lowerCAmelCase :Optional[int] = '' _lowerCAmelCase , _lowerCAmelCase :str = process_story(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , [] ) self.assertEqual(_UpperCAmelCase , [] ) def SCREAMING_SNAKE_CASE__ ( self: str ): _lowerCAmelCase :Optional[Any] = ( 'It was the year of Our Lord one thousand seven hundred and ' 'seventy-five\n\nSpiritual revelations were conceded to England ' 'at that favoured period, as at this.\n@highlight\n\nIt was the best of times' ) _lowerCAmelCase , _lowerCAmelCase :Optional[int] = process_story(_UpperCAmelCase ) _lowerCAmelCase :Optional[Any] = [ 'It was the year of Our Lord one thousand seven hundred and seventy-five.', 'Spiritual revelations were conceded to England at that favoured period, as at this.', ] self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) _lowerCAmelCase :Optional[int] = ['It was the best of times.'] self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Tuple ): _lowerCAmelCase :Union[str, Any] = torch.tensor([1, 2, 3, 4] ) _lowerCAmelCase :List[Any] = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 0 ).numpy() , expected.numpy() ) def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ): _lowerCAmelCase :List[Any] = torch.tensor([1, 2, 3, 4, 23, 23, 23] ) _lowerCAmelCase :Optional[int] = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 23 ).numpy() , expected.numpy() ) def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): _lowerCAmelCase :Tuple = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) _lowerCAmelCase :List[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 1 ).numpy() , expected.numpy() ) def SCREAMING_SNAKE_CASE__ ( self: str ): _lowerCAmelCase :List[str] = 101 _lowerCAmelCase :Dict = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] ) _lowerCAmelCase :int = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) _lowerCAmelCase :List[str] = compute_token_type_ids(_UpperCAmelCase , _UpperCAmelCase ) np.testing.assert_array_equal(_UpperCAmelCase , _UpperCAmelCase )
687
0
'''simple docstring''' import contextlib import copy import random from typing import Any, Dict, Iterable, Optional, Union import numpy as np import torch from .utils import deprecate, is_transformers_available if is_transformers_available(): import transformers def _a (lowercase__ : int ) -> List[Any]: """simple docstring""" random.seed(lowercase__ ) np.random.seed(lowercase__ ) torch.manual_seed(lowercase__ ) torch.cuda.manual_seed_all(lowercase__ ) # ^^ safe to call this function even if cuda is not available class _lowercase : def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : Iterable[torch.nn.Parameter] , SCREAMING_SNAKE_CASE_ : float = 0.9_9_9_9 , SCREAMING_SNAKE_CASE_ : float = 0.0 , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Union[float, int] = 1.0 , SCREAMING_SNAKE_CASE_ : Union[float, int] = 2 / 3 , SCREAMING_SNAKE_CASE_ : Optional[Any] = None , SCREAMING_SNAKE_CASE_ : Dict[str, Any] = None , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> List[Any]: if isinstance(_UpperCAmelCase , torch.nn.Module ): __snake_case = ( 'Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. ' 'Please pass the parameters of the module instead.' ) deprecate( 'passing a `torch.nn.Module` to `ExponentialMovingAverage`' , '1.0.0' , _UpperCAmelCase , standard_warn=_UpperCAmelCase , ) __snake_case = parameters.parameters() # set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility __snake_case = True if kwargs.get('max_value' , _UpperCAmelCase ) is not None: __snake_case = 'The `max_value` argument is deprecated. Please use `decay` instead.' deprecate('max_value' , '1.0.0' , _UpperCAmelCase , standard_warn=_UpperCAmelCase ) __snake_case = kwargs['max_value'] if kwargs.get('min_value' , _UpperCAmelCase ) is not None: __snake_case = 'The `min_value` argument is deprecated. Please use `min_decay` instead.' deprecate('min_value' , '1.0.0' , _UpperCAmelCase , standard_warn=_UpperCAmelCase ) __snake_case = kwargs['min_value'] __snake_case = list(_UpperCAmelCase ) __snake_case = [p.clone().detach() for p in parameters] if kwargs.get('device' , _UpperCAmelCase ) is not None: __snake_case = 'The `device` argument is deprecated. Please use `to` instead.' deprecate('device' , '1.0.0' , _UpperCAmelCase , standard_warn=_UpperCAmelCase ) self.to(device=kwargs['device'] ) __snake_case = None __snake_case = decay __snake_case = min_decay __snake_case = update_after_step __snake_case = use_ema_warmup __snake_case = inv_gamma __snake_case = power __snake_case = 0 __snake_case = None # set in `step()` __snake_case = model_cls __snake_case = model_config @classmethod def a ( cls : int , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[Any]: __snake_case = model_cls.load_config(_UpperCAmelCase , return_unused_kwargs=_UpperCAmelCase ) __snake_case = model_cls.from_pretrained(_UpperCAmelCase ) __snake_case = cls(model.parameters() , model_cls=_UpperCAmelCase , model_config=model.config ) ema_model.load_state_dict(_UpperCAmelCase ) return ema_model def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[Any]: if self.model_cls is None: raise ValueError('`save_pretrained` can only be used if `model_cls` was defined at __init__.' ) if self.model_config is None: raise ValueError('`save_pretrained` can only be used if `model_config` was defined at __init__.' ) __snake_case = self.model_cls.from_config(self.model_config ) __snake_case = self.state_dict() state_dict.pop('shadow_params' , _UpperCAmelCase ) model.register_to_config(**_UpperCAmelCase ) self.copy_to(model.parameters() ) model.save_pretrained(_UpperCAmelCase ) def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int ) -> int: __snake_case = max(0 , optimization_step - self.update_after_step - 1 ) if step <= 0: return 0.0 if self.use_ema_warmup: __snake_case = 1 - (1 + step / self.inv_gamma) ** -self.power else: __snake_case = (1 + step) / (10 + step) __snake_case = min(_UpperCAmelCase , self.decay ) # make sure decay is not smaller than min_decay __snake_case = max(_UpperCAmelCase , self.min_decay ) return cur_decay_value @torch.no_grad() def a ( self : int , SCREAMING_SNAKE_CASE_ : Iterable[torch.nn.Parameter] ) -> str: if isinstance(_UpperCAmelCase , torch.nn.Module ): __snake_case = ( 'Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. ' 'Please pass the parameters of the module instead.' ) deprecate( 'passing a `torch.nn.Module` to `ExponentialMovingAverage.step`' , '1.0.0' , _UpperCAmelCase , standard_warn=_UpperCAmelCase , ) __snake_case = parameters.parameters() __snake_case = list(_UpperCAmelCase ) self.optimization_step += 1 # Compute the decay factor for the exponential moving average. __snake_case = self.get_decay(self.optimization_step ) __snake_case = decay __snake_case = 1 - decay __snake_case = contextlib.nullcontext if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): import deepspeed for s_param, param in zip(self.shadow_params , _UpperCAmelCase ): if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): __snake_case = deepspeed.zero.GatheredParameters(_UpperCAmelCase , modifier_rank=_UpperCAmelCase ) with context_manager(): if param.requires_grad: s_param.sub_(one_minus_decay * (s_param - param) ) else: s_param.copy_(_UpperCAmelCase ) def a ( self : int , SCREAMING_SNAKE_CASE_ : Iterable[torch.nn.Parameter] ) -> str: __snake_case = list(_UpperCAmelCase ) for s_param, param in zip(self.shadow_params , _UpperCAmelCase ): param.data.copy_(s_param.to(param.device ).data ) def a ( self : Dict , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : Any=None ) -> Optional[Any]: __snake_case = [ p.to(device=_UpperCAmelCase , dtype=_UpperCAmelCase ) if p.is_floating_point() else p.to(device=_UpperCAmelCase ) for p in self.shadow_params ] def a ( self : Tuple ) -> Tuple: return { "decay": self.decay, "min_decay": self.min_decay, "optimization_step": self.optimization_step, "update_after_step": self.update_after_step, "use_ema_warmup": self.use_ema_warmup, "inv_gamma": self.inv_gamma, "power": self.power, "shadow_params": self.shadow_params, } def a ( self : str , SCREAMING_SNAKE_CASE_ : Iterable[torch.nn.Parameter] ) -> Optional[Any]: __snake_case = [param.detach().cpu().clone() for param in parameters] def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Iterable[torch.nn.Parameter] ) -> int: if self.temp_stored_params is None: raise RuntimeError('This ExponentialMovingAverage has no `store()`ed weights ' 'to `restore()`' ) for c_param, param in zip(self.temp_stored_params , _UpperCAmelCase ): param.data.copy_(c_param.data ) # Better memory-wise. __snake_case = None def a ( self : str , SCREAMING_SNAKE_CASE_ : dict ) -> str: __snake_case = copy.deepcopy(_UpperCAmelCase ) __snake_case = state_dict.get('decay' , self.decay ) if self.decay < 0.0 or self.decay > 1.0: raise ValueError('Decay must be between 0 and 1' ) __snake_case = state_dict.get('min_decay' , self.min_decay ) if not isinstance(self.min_decay , _UpperCAmelCase ): raise ValueError('Invalid min_decay' ) __snake_case = state_dict.get('optimization_step' , self.optimization_step ) if not isinstance(self.optimization_step , _UpperCAmelCase ): raise ValueError('Invalid optimization_step' ) __snake_case = state_dict.get('update_after_step' , self.update_after_step ) if not isinstance(self.update_after_step , _UpperCAmelCase ): raise ValueError('Invalid update_after_step' ) __snake_case = state_dict.get('use_ema_warmup' , self.use_ema_warmup ) if not isinstance(self.use_ema_warmup , _UpperCAmelCase ): raise ValueError('Invalid use_ema_warmup' ) __snake_case = state_dict.get('inv_gamma' , self.inv_gamma ) if not isinstance(self.inv_gamma , (float, int) ): raise ValueError('Invalid inv_gamma' ) __snake_case = state_dict.get('power' , self.power ) if not isinstance(self.power , (float, int) ): raise ValueError('Invalid power' ) __snake_case = state_dict.get('shadow_params' , _UpperCAmelCase ) if shadow_params is not None: __snake_case = shadow_params if not isinstance(self.shadow_params , _UpperCAmelCase ): raise ValueError('shadow_params must be a list' ) if not all(isinstance(_UpperCAmelCase , torch.Tensor ) for p in self.shadow_params ): raise ValueError('shadow_params must all be Tensors' )
56
def UpperCamelCase_( __magic_name__ : int ): """simple docstring""" return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number if __name__ == "__main__": print("""Program to check whether a number is a Perfect number or not...""") a = int(input("""Enter number: """).strip()) print(F'''{number} is {'' if perfect(number) else 'not '}a Perfect Number.''')
687
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = logging.get_logger(__name__) SCREAMING_SNAKE_CASE = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} SCREAMING_SNAKE_CASE = { 'vocab_file': { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model', 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model' ), }, 'tokenizer_file': { 'google/bigbird-roberta-base': ( 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json' ), 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json' ), }, } SCREAMING_SNAKE_CASE = { 'google/bigbird-roberta-base': 4096, 'google/bigbird-roberta-large': 4096, 'google/bigbird-base-trivia-itc': 4096, } SCREAMING_SNAKE_CASE = '▁' class A_ ( snake_case__ ): '''simple docstring''' _SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES _SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP _SCREAMING_SNAKE_CASE : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _SCREAMING_SNAKE_CASE : List[str] = BigBirdTokenizer _SCREAMING_SNAKE_CASE : Tuple = ['input_ids', 'attention_mask'] _SCREAMING_SNAKE_CASE : List[int] = [] def __init__( self , _A=None , _A=None , _A="<unk>" , _A="<s>" , _A="</s>" , _A="<pad>" , _A="[SEP]" , _A="[MASK]" , _A="[CLS]" , **_A , ) -> List[str]: """simple docstring""" _UpperCAmelCase : int = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else bos_token _UpperCAmelCase : Union[str, Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else eos_token _UpperCAmelCase : List[Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else unk_token _UpperCAmelCase : str = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else pad_token _UpperCAmelCase : int = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else cls_token _UpperCAmelCase : int = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else sep_token # Mask token behave like a normal word, i.e. include the space before it _UpperCAmelCase : Optional[int] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else mask_token super().__init__( _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , **_UpperCAmelCase , ) _UpperCAmelCase : str = vocab_file _UpperCAmelCase : int = False if not self.vocab_file else True def snake_case__ ( self , _A , _A = None) -> Dict: """simple docstring""" _UpperCAmelCase : Union[str, Any] = [self.sep_token_id] _UpperCAmelCase : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def snake_case__ ( self , _A , _A = None , _A = False) -> Any: """simple docstring""" if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''') return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(_UpperCAmelCase)) + [1] return [1] + ([0] * len(_UpperCAmelCase)) + [1] + ([0] * len(_UpperCAmelCase)) + [1] def snake_case__ ( self , _A , _A = None) -> Dict: """simple docstring""" _UpperCAmelCase : Union[str, Any] = [self.sep_token_id] _UpperCAmelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def snake_case__ ( self , _A , _A = None) -> Optional[Any]: """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''') if not os.path.isdir(_UpperCAmelCase): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''') return _UpperCAmelCase : Optional[Any] = os.path.join( _UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file''']) if os.path.abspath(self.vocab_file) != os.path.abspath(_UpperCAmelCase): copyfile(self.vocab_file , _UpperCAmelCase) return (out_vocab_file,)
485
from __future__ import annotations from collections.abc import MutableSequence class UpperCAmelCase_ : """simple docstring""" def __init__( self: List[Any] , _UpperCAmelCase: int , _UpperCAmelCase: MutableSequence[float] ): if len(_UpperCAmelCase ) != degree + 1: raise ValueError( 'The number of coefficients should be equal to the degree + 1.' ) _lowerCAmelCase :list[float] = list(_UpperCAmelCase ) _lowerCAmelCase :Optional[Any] = degree def __add__( self: str , _UpperCAmelCase: Polynomial ): if self.degree > polynomial_a.degree: _lowerCAmelCase :Any = self.coefficients[:] for i in range(polynomial_a.degree + 1 ): coefficients[i] += polynomial_a.coefficients[i] return Polynomial(self.degree , _UpperCAmelCase ) else: _lowerCAmelCase :List[Any] = polynomial_a.coefficients[:] for i in range(self.degree + 1 ): coefficients[i] += self.coefficients[i] return Polynomial(polynomial_a.degree , _UpperCAmelCase ) def __sub__( self: str , _UpperCAmelCase: Polynomial ): return self + polynomial_a * Polynomial(0 , [-1] ) def __neg__( self: Union[str, Any] ): return Polynomial(self.degree , [-c for c in self.coefficients] ) def __mul__( self: int , _UpperCAmelCase: Polynomial ): _lowerCAmelCase :list[float] = [0] * (self.degree + polynomial_a.degree + 1) for i in range(self.degree + 1 ): for j in range(polynomial_a.degree + 1 ): coefficients[i + j] += ( self.coefficients[i] * polynomial_a.coefficients[j] ) return Polynomial(self.degree + polynomial_a.degree , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: int | float ): _lowerCAmelCase :int | float = 0 for i in range(self.degree + 1 ): result += self.coefficients[i] * (substitution**i) return result def __str__( self: Union[str, Any] ): _lowerCAmelCase :Dict = '' for i in range(self.degree , -1 , -1 ): if self.coefficients[i] == 0: continue elif self.coefficients[i] > 0: if polynomial: polynomial += " + " else: polynomial += " - " if i == 0: polynomial += str(abs(self.coefficients[i] ) ) elif i == 1: polynomial += str(abs(self.coefficients[i] ) ) + "x" else: polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_UpperCAmelCase ) return polynomial def __repr__( self: Optional[Any] ): return self.__str__() def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ): _lowerCAmelCase :list[float] = [0] * self.degree for i in range(self.degree ): _lowerCAmelCase :Tuple = self.coefficients[i + 1] * (i + 1) return Polynomial(self.degree - 1 , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: int | float = 0 ): _lowerCAmelCase :list[float] = [0] * (self.degree + 2) _lowerCAmelCase :str = constant for i in range(self.degree + 1 ): _lowerCAmelCase :List[str] = self.coefficients[i] / (i + 1) return Polynomial(self.degree + 1 , _UpperCAmelCase ) def __eq__( self: List[Any] , _UpperCAmelCase: object ): if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): return False if self.degree != polynomial_a.degree: return False for i in range(self.degree + 1 ): if self.coefficients[i] != polynomial_a.coefficients[i]: return False return True def __ne__( self: Optional[Any] , _UpperCAmelCase: object ): return not self.__eq__(_UpperCAmelCase )
687
0
"""simple docstring""" def UpperCAmelCase ( A : int = 3 , A : int = 7 , A : int = 100_0000 ): '''simple docstring''' _UpperCAmelCase = 0 _UpperCAmelCase = 1 for current_denominator in range(1 , limit + 1 ): _UpperCAmelCase = current_denominator * numerator // denominator if current_denominator % denominator == 0: current_numerator -= 1 if current_numerator * max_denominator > current_denominator * max_numerator: _UpperCAmelCase = current_numerator _UpperCAmelCase = current_denominator return max_numerator if __name__ == "__main__": print(solution(numerator=3, denominator=7, limit=1_00_00_00))
573
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available a = { """configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ """GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTNeoForCausalLM""", """GPTNeoForQuestionAnswering""", """GPTNeoForSequenceClassification""", """GPTNeoForTokenClassification""", """GPTNeoModel""", """GPTNeoPreTrainedModel""", """load_tf_weights_in_gpt_neo""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ """FlaxGPTNeoForCausalLM""", """FlaxGPTNeoModel""", """FlaxGPTNeoPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neo import ( GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, GPTNeoPreTrainedModel, load_tf_weights_in_gpt_neo, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel else: import sys a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
687
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) a_ = { """configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""], """tokenization_convbert""": ["""ConvBertTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ["""ConvBertTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ """CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """ConvBertForMaskedLM""", """ConvBertForMultipleChoice""", """ConvBertForQuestionAnswering""", """ConvBertForSequenceClassification""", """ConvBertForTokenClassification""", """ConvBertLayer""", """ConvBertModel""", """ConvBertPreTrainedModel""", """load_tf_weights_in_convbert""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ """TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFConvBertForMaskedLM""", """TFConvBertForMultipleChoice""", """TFConvBertForQuestionAnswering""", """TFConvBertForSequenceClassification""", """TFConvBertForTokenClassification""", """TFConvBertLayer""", """TFConvBertModel""", """TFConvBertPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig from .tokenization_convbert import ConvBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_convbert_fast import ConvBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convbert import ( CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convbert import ( TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
221
from __future__ import annotations from decimal import Decimal from math import * # noqa: F403 from sympy import diff def UpperCamelCase_( __magic_name__ : str , __magic_name__ : float | Decimal , __magic_name__ : float = 10**-10 ): """simple docstring""" _lowerCAmelCase :Optional[Any] = a while True: _lowerCAmelCase :str = Decimal(__magic_name__ ) - ( Decimal(eval(__magic_name__ ) ) / Decimal(eval(str(diff(__magic_name__ ) ) ) ) # noqa: S307 ) # This number dictates the accuracy of the answer if abs(eval(__magic_name__ ) ) < precision: # noqa: S307 return float(__magic_name__ ) # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''') # Find root of polynomial print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}''') # Find Square Root of 5 print(F'''The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}''') # Exponential Roots print(F'''The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}''')
687
0
'''simple docstring''' from datasets.utils.patching import _PatchedModuleObj, patch_submodule from . import _test_patching def lowercase_ ( ) -> str: """simple docstring""" import os as original_os from os import path as original_path from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join lowercase : Optional[Any] ='__test_patch_submodule_mock__' with patch_submodule(_test_patching , '''os.path.join''' , __A ): # Every way to access os.path.join must be patched, and the rest must stay untouched # check os.path.join assert isinstance(_test_patching.os , _PatchedModuleObj ) assert isinstance(_test_patching.os.path , _PatchedModuleObj ) assert _test_patching.os.path.join is mock # check path.join assert isinstance(_test_patching.path , _PatchedModuleObj ) assert _test_patching.path.join is mock # check join assert _test_patching.join is mock # check that the other attributes are untouched assert _test_patching.os.rename is original_rename assert _test_patching.path.dirname is original_dirname assert _test_patching.os.path.dirname is original_dirname # Even renamed modules or objects must be patched # check renamed_os.path.join assert isinstance(_test_patching.renamed_os , _PatchedModuleObj ) assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj ) assert _test_patching.renamed_os.path.join is mock # check renamed_path.join assert isinstance(_test_patching.renamed_path , _PatchedModuleObj ) assert _test_patching.renamed_path.join is mock # check renamed_join assert _test_patching.renamed_join is mock # check that the other attributes are untouched assert _test_patching.renamed_os.rename is original_rename assert _test_patching.renamed_path.dirname is original_dirname assert _test_patching.renamed_os.path.dirname is original_dirname # check that everthing is back to normal when the patch is over assert _test_patching.os is original_os assert _test_patching.path is original_path assert _test_patching.join is original_join assert _test_patching.renamed_os is original_os assert _test_patching.renamed_path is original_path assert _test_patching.renamed_join is original_join def lowercase_ ( ) -> Dict: """simple docstring""" assert _test_patching.open is open lowercase : Any ='__test_patch_submodule_builtin_mock__' # _test_patching has "open" in its globals assert _test_patching.open is open with patch_submodule(_test_patching , '''open''' , __A ): assert _test_patching.open is mock # check that everthing is back to normal when the patch is over assert _test_patching.open is open def lowercase_ ( ) -> List[Any]: """simple docstring""" lowercase : Optional[int] ='__test_patch_submodule_missing_mock__' with patch_submodule(_test_patching , '''pandas.read_csv''' , __A ): pass def lowercase_ ( ) -> List[Any]: """simple docstring""" lowercase : str ='__test_patch_submodule_missing_builtin_mock__' # _test_patching doesn't have "len" in its globals assert getattr(_test_patching , '''len''' , __A ) is None with patch_submodule(_test_patching , '''len''' , __A ): assert _test_patching.len is mock assert _test_patching.len is len def lowercase_ ( ) -> int: """simple docstring""" lowercase : Optional[int] ='__test_patch_submodule_start_and_stop_mock__' lowercase : Optional[int] =patch_submodule(_test_patching , '''open''' , __A ) assert _test_patching.open is open patch.start() assert _test_patching.open is mock patch.stop() assert _test_patching.open is open def lowercase_ ( ) -> str: """simple docstring""" from os import rename as original_rename from os.path import dirname as original_dirname from os.path import join as original_join lowercase : Optional[Any] ='__test_patch_submodule_successive_join__' lowercase : List[Any] ='__test_patch_submodule_successive_dirname__' lowercase : List[str] ='__test_patch_submodule_successive_rename__' assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename with patch_submodule(_test_patching , '''os.path.join''' , __A ): with patch_submodule(_test_patching , '''os.rename''' , __A ): with patch_submodule(_test_patching , '''os.path.dirname''' , __A ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename # try another order with patch_submodule(_test_patching , '''os.rename''' , __A ): with patch_submodule(_test_patching , '''os.path.join''' , __A ): with patch_submodule(_test_patching , '''os.path.dirname''' , __A ): assert _test_patching.os.path.join is mock_join assert _test_patching.os.path.dirname is mock_dirname assert _test_patching.os.rename is mock_rename assert _test_patching.os.path.join is original_join assert _test_patching.os.path.dirname is original_dirname assert _test_patching.os.rename is original_rename def lowercase_ ( ) -> Tuple: """simple docstring""" lowercase : List[Any] ='__test_patch_submodule_doesnt_exist_mock__' with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , __A ): pass with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , __A ): pass
94
import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) a = { """sample_size""": 32, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 2, """num_class_embeds""": 1_000, """block_out_channels""": [32, 64], """attention_head_dim""": 8, """down_block_types""": [ """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """scale_shift""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } a = { """sample_size""": 64, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 3, """num_class_embeds""": 1_000, """block_out_channels""": [192, 192 * 2, 192 * 3, 192 * 4], """attention_head_dim""": 64, """down_block_types""": [ """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """AttnUpBlock2D""", """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """scale_shift""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } a = { """sample_size""": 256, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 2, """num_class_embeds""": None, """block_out_channels""": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4], """attention_head_dim""": 64, """down_block_types""": [ """ResnetDownsampleBlock2D""", """ResnetDownsampleBlock2D""", """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """AttnUpBlock2D""", """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", """ResnetUpsampleBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """default""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } a = { """num_train_timesteps""": 40, """sigma_min""": 0.0_0_2, """sigma_max""": 8_0.0, } a = { """num_train_timesteps""": 201, """sigma_min""": 0.0_0_2, """sigma_max""": 8_0.0, } a = { """num_train_timesteps""": 151, """sigma_min""": 0.0_0_2, """sigma_max""": 8_0.0, } def UpperCamelCase_( __magic_name__ : Dict ): """simple docstring""" if isinstance(__magic_name__ , __magic_name__ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError('boolean value expected' ) def UpperCamelCase_( __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any]=False ): """simple docstring""" _lowerCAmelCase :int = checkpoint[f"""{old_prefix}.in_layers.0.weight"""] _lowerCAmelCase :Union[str, Any] = checkpoint[f"""{old_prefix}.in_layers.0.bias"""] _lowerCAmelCase :str = checkpoint[f"""{old_prefix}.in_layers.2.weight"""] _lowerCAmelCase :Optional[Any] = checkpoint[f"""{old_prefix}.in_layers.2.bias"""] _lowerCAmelCase :str = checkpoint[f"""{old_prefix}.emb_layers.1.weight"""] _lowerCAmelCase :Any = checkpoint[f"""{old_prefix}.emb_layers.1.bias"""] _lowerCAmelCase :str = checkpoint[f"""{old_prefix}.out_layers.0.weight"""] _lowerCAmelCase :List[Any] = checkpoint[f"""{old_prefix}.out_layers.0.bias"""] _lowerCAmelCase :Optional[int] = checkpoint[f"""{old_prefix}.out_layers.3.weight"""] _lowerCAmelCase :Dict = checkpoint[f"""{old_prefix}.out_layers.3.bias"""] if has_skip: _lowerCAmelCase :List[Any] = checkpoint[f"""{old_prefix}.skip_connection.weight"""] _lowerCAmelCase :int = checkpoint[f"""{old_prefix}.skip_connection.bias"""] return new_checkpoint def UpperCamelCase_( __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : List[str] , __magic_name__ : List[str]=None ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Tuple = checkpoint[f"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Any = checkpoint[f"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 ) _lowerCAmelCase :int = checkpoint[f"""{old_prefix}.norm.weight"""] _lowerCAmelCase :Dict = checkpoint[f"""{old_prefix}.norm.bias"""] _lowerCAmelCase :Dict = weight_q.squeeze(-1 ).squeeze(-1 ) _lowerCAmelCase :str = bias_q.squeeze(-1 ).squeeze(-1 ) _lowerCAmelCase :List[str] = weight_k.squeeze(-1 ).squeeze(-1 ) _lowerCAmelCase :Optional[Any] = bias_k.squeeze(-1 ).squeeze(-1 ) _lowerCAmelCase :Tuple = weight_v.squeeze(-1 ).squeeze(-1 ) _lowerCAmelCase :List[Any] = bias_v.squeeze(-1 ).squeeze(-1 ) _lowerCAmelCase :int = ( checkpoint[f"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 ) ) _lowerCAmelCase :Optional[Any] = checkpoint[f"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def UpperCamelCase_( __magic_name__ : str , __magic_name__ : Optional[Any] ): """simple docstring""" _lowerCAmelCase :Union[str, Any] = torch.load(__magic_name__ , map_location='cpu' ) _lowerCAmelCase :List[Any] = {} _lowerCAmelCase :List[str] = checkpoint['time_embed.0.weight'] _lowerCAmelCase :Tuple = checkpoint['time_embed.0.bias'] _lowerCAmelCase :Dict = checkpoint['time_embed.2.weight'] _lowerCAmelCase :Union[str, Any] = checkpoint['time_embed.2.bias'] if unet_config["num_class_embeds"] is not None: _lowerCAmelCase :Union[str, Any] = checkpoint['label_emb.weight'] _lowerCAmelCase :str = checkpoint['input_blocks.0.0.weight'] _lowerCAmelCase :str = checkpoint['input_blocks.0.0.bias'] _lowerCAmelCase :List[Any] = unet_config['down_block_types'] _lowerCAmelCase :Any = unet_config['layers_per_block'] _lowerCAmelCase :List[Any] = unet_config['attention_head_dim'] _lowerCAmelCase :Tuple = unet_config['block_out_channels'] _lowerCAmelCase :List[str] = 1 _lowerCAmelCase :Optional[int] = channels_list[0] for i, layer_type in enumerate(__magic_name__ ): _lowerCAmelCase :Tuple = channels_list[i] _lowerCAmelCase :Optional[Any] = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(__magic_name__ ): _lowerCAmelCase :int = f"""down_blocks.{i}.resnets.{j}""" _lowerCAmelCase :List[Any] = f"""input_blocks.{current_layer}.0""" _lowerCAmelCase :int = True if j == 0 and downsample_block_has_skip else False _lowerCAmelCase :List[Any] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(__magic_name__ ): _lowerCAmelCase :List[str] = f"""down_blocks.{i}.resnets.{j}""" _lowerCAmelCase :Optional[int] = f"""input_blocks.{current_layer}.0""" _lowerCAmelCase :List[str] = True if j == 0 and downsample_block_has_skip else False _lowerCAmelCase :Optional[int] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ ) _lowerCAmelCase :Optional[int] = f"""down_blocks.{i}.attentions.{j}""" _lowerCAmelCase :str = f"""input_blocks.{current_layer}.1""" _lowerCAmelCase :Optional[Any] = convert_attention( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) current_layer += 1 if i != len(__magic_name__ ) - 1: _lowerCAmelCase :Union[str, Any] = f"""down_blocks.{i}.downsamplers.0""" _lowerCAmelCase :Tuple = f"""input_blocks.{current_layer}.0""" _lowerCAmelCase :Optional[int] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) current_layer += 1 _lowerCAmelCase :Dict = current_channels # hardcoded the mid-block for now _lowerCAmelCase :int = 'mid_block.resnets.0' _lowerCAmelCase :Optional[Any] = 'middle_block.0' _lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) _lowerCAmelCase :Optional[int] = 'mid_block.attentions.0' _lowerCAmelCase :Optional[int] = 'middle_block.1' _lowerCAmelCase :List[Any] = convert_attention(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) _lowerCAmelCase :Union[str, Any] = 'mid_block.resnets.1' _lowerCAmelCase :Optional[int] = 'middle_block.2' _lowerCAmelCase :int = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) _lowerCAmelCase :Tuple = 0 _lowerCAmelCase :str = unet_config['up_block_types'] for i, layer_type in enumerate(__magic_name__ ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): _lowerCAmelCase :Optional[Any] = f"""up_blocks.{i}.resnets.{j}""" _lowerCAmelCase :Dict = f"""output_blocks.{current_layer}.0""" _lowerCAmelCase :Any = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ ) current_layer += 1 if i != len(__magic_name__ ) - 1: _lowerCAmelCase :Any = f"""up_blocks.{i}.upsamplers.0""" _lowerCAmelCase :Dict = f"""output_blocks.{current_layer-1}.1""" _lowerCAmelCase :Tuple = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): _lowerCAmelCase :Tuple = f"""up_blocks.{i}.resnets.{j}""" _lowerCAmelCase :List[str] = f"""output_blocks.{current_layer}.0""" _lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ ) _lowerCAmelCase :str = f"""up_blocks.{i}.attentions.{j}""" _lowerCAmelCase :List[Any] = f"""output_blocks.{current_layer}.1""" _lowerCAmelCase :int = convert_attention( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) current_layer += 1 if i != len(__magic_name__ ) - 1: _lowerCAmelCase :Optional[int] = f"""up_blocks.{i}.upsamplers.0""" _lowerCAmelCase :int = f"""output_blocks.{current_layer-1}.2""" _lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) _lowerCAmelCase :str = checkpoint['out.0.weight'] _lowerCAmelCase :Union[str, Any] = checkpoint['out.0.bias'] _lowerCAmelCase :List[Any] = checkpoint['out.2.weight'] _lowerCAmelCase :Dict = checkpoint['out.2.bias'] return new_checkpoint if __name__ == "__main__": a = argparse.ArgumentParser() parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""") parser.add_argument( """--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model.""" ) parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""") a = parser.parse_args() a = strabool(args.class_cond) a = os.path.basename(args.unet_path) print(F'''Checkpoint: {ckpt_name}''') # Get U-Net config if "imagenet64" in ckpt_name: a = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): a = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: a = TEST_UNET_CONFIG else: raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''') if not args.class_cond: a = None a = con_pt_to_diffuser(args.unet_path, unet_config) a = UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: a = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: a = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): a = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''') a = CMStochasticIterativeScheduler(**scheduler_config) a = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
687
0
from typing import List import datasets from datasets.tasks import AudioClassification from ..folder_based_builder import folder_based_builder _lowerCamelCase : int = datasets.utils.logging.get_logger(__name__) class __snake_case (folder_based_builder.FolderBasedBuilderConfig ): lowerCAmelCase__ = None lowerCAmelCase__ = None class __snake_case (folder_based_builder.FolderBasedBuilder ): lowerCAmelCase__ = datasets.Audio() lowerCAmelCase__ = 'audio' lowerCAmelCase__ = AudioFolderConfig lowerCAmelCase__ = 42 # definition at the bottom of the script lowerCAmelCase__ = AudioClassification(audio_column="audio" , label_column="label" ) _lowerCamelCase : Optional[int] = [ ".aiff", ".au", ".avr", ".caf", ".flac", ".htk", ".svx", ".mat4", ".mat5", ".mpc2k", ".ogg", ".paf", ".pvf", ".raw", ".rf64", ".sd2", ".sds", ".ircam", ".voc", ".w64", ".wav", ".nist", ".wavex", ".wve", ".xi", ".mp3", ".opus", ] _lowerCamelCase : Tuple = AUDIO_EXTENSIONS
429
import os import re import shutil import sys import tempfile import unittest import black a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. a = """ \"\"\" Output class for the scheduler's step function output. Args: prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the denoising loop. pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): The predicted denoised sample (x_{0}) based on the model output from the current timestep. `pred_original_sample` can be used to preview progress or for guidance. \"\"\" prev_sample: torch.FloatTensor pred_original_sample: Optional[torch.FloatTensor] = None """ class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE__ ( self: Dict ): _lowerCAmelCase :Optional[Any] = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) ) _lowerCAmelCase :Tuple = self.diffusers_dir shutil.copy( os.path.join(_UpperCAmelCase , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , ) def SCREAMING_SNAKE_CASE__ ( self: List[Any] ): _lowerCAmelCase :str = 'src/diffusers' shutil.rmtree(self.diffusers_dir ) def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Tuple=None ): _lowerCAmelCase :int = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code if overwrite_result is not None: _lowerCAmelCase :Dict = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result _lowerCAmelCase :Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 ) _lowerCAmelCase :List[str] = black.format_str(_UpperCAmelCase , mode=_UpperCAmelCase ) _lowerCAmelCase :Union[str, Any] = os.path.join(self.diffusers_dir , 'new_code.py' ) with open(_UpperCAmelCase , 'w' , newline='\n' ) as f: f.write(_UpperCAmelCase ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(_UpperCAmelCase ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=_UpperCAmelCase ) with open(_UpperCAmelCase , 'r' ) as f: self.assertTrue(f.read() , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ): _lowerCAmelCase :List[str] = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ): # Base copy consistency self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , ) # With no empty line at the end self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , _UpperCAmelCase , ) # Copy consistency with rename self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , _UpperCAmelCase ) , ) # Copy consistency with a really long name _lowerCAmelCase :Optional[int] = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason' self.check_copy_consistency( f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('Bert' , _UpperCAmelCase , _UpperCAmelCase ) , ) # Copy consistency with overwrite self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , _UpperCAmelCase , overwrite_result=re.sub('DDPM' , 'Test' , _UpperCAmelCase ) , )
687
0
"""simple docstring""" import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import Callable, Dict, List, Tuple import timm import torch import torch.nn as nn from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf from huggingface_hub import cached_download, hf_hub_url from torch import Tensor from vissl.models.model_helpers import get_trunk_forward_outputs from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel from transformers.utils import logging logging.set_verbosity_info() __A : Union[str, Any] = logging.get_logger() @dataclass class lowerCAmelCase__ : """simple docstring""" __UpperCAmelCase : nn.Module __UpperCAmelCase : List[nn.Module] = field(default_factory=snake_case__ ) __UpperCAmelCase : list = field(default_factory=snake_case__ ) def snake_case ( self : List[str] , lowercase__ : Optional[int] , lowercase__ : Tensor , lowercase__ : Tensor ): __lowercase : Optional[int] = len(list(m.modules() ) ) == 1 or isinstance(_UpperCAmelCase , nn.Convad ) or isinstance(_UpperCAmelCase , nn.BatchNormad ) if has_not_submodules: self.traced.append(_UpperCAmelCase ) def __call__( self : Optional[int] , lowercase__ : Tensor ): for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(_UpperCAmelCase ) [x.remove() for x in self.handles] return self @property def snake_case ( self : Any ): # check the len of the state_dict keys to see if we have learnable params return list(filter(lambda lowercase__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class lowerCAmelCase__ : """simple docstring""" __UpperCAmelCase : nn.Module __UpperCAmelCase : nn.Module __UpperCAmelCase : int = 1 __UpperCAmelCase : List = field(default_factory=snake_case__ ) __UpperCAmelCase : List = field(default_factory=snake_case__ ) __UpperCAmelCase : bool = True def __call__( self : str , lowercase__ : Tensor ): __lowercase : Union[str, Any] = Tracker(self.dest )(_UpperCAmelCase ).parametrized __lowercase : Dict = Tracker(self.src )(_UpperCAmelCase ).parametrized __lowercase : str = list(filter(lambda lowercase__ : type(_UpperCAmelCase ) not in self.src_skip , _UpperCAmelCase ) ) __lowercase : Any = list(filter(lambda lowercase__ : type(_UpperCAmelCase ) not in self.dest_skip , _UpperCAmelCase ) ) if len(_UpperCAmelCase ) != len(_UpperCAmelCase ) and self.raise_if_mismatch: raise Exception( f'Numbers of operations are different. Source module has {len(_UpperCAmelCase )} operations while' f' destination module has {len(_UpperCAmelCase )}.' ) for dest_m, src_m in zip(_UpperCAmelCase , _UpperCAmelCase ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(f'Transfered from={src_m} to={dest_m}' ) class lowerCAmelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Optional[int] , lowercase__ : nn.Module ): super().__init__() __lowercase : List[Tuple[str, nn.Module]] = [] # - get the stem feature_blocks.append(("conv1", model.stem) ) # - get all the feature blocks for k, v in model.trunk_output.named_children(): assert k.startswith("block" ), f'Unexpected layer name {k}' __lowercase : int = len(_UpperCAmelCase ) + 1 feature_blocks.append((f'res{block_index}', v) ) __lowercase : Tuple = nn.ModuleDict(_UpperCAmelCase ) def snake_case ( self : str , lowercase__ : Tensor ): return get_trunk_forward_outputs( _UpperCAmelCase , out_feat_keys=_UpperCAmelCase , feature_blocks=self._feature_blocks , ) class lowerCAmelCase__ ( snake_case__ ): """simple docstring""" def snake_case ( self : int , lowercase__ : str ): __lowercase : Any = x.split("-" ) return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] ) def __getitem__( self : Any , lowercase__ : str ): # default to timm! if x not in self: __lowercase : List[str] = self.convert_name_to_timm(_UpperCAmelCase ) __lowercase : Dict = partial(lambda: (timm.create_model(_UpperCAmelCase , pretrained=_UpperCAmelCase ).eval(), None) ) else: __lowercase : Any = super().__getitem__(_UpperCAmelCase ) return val class lowerCAmelCase__ ( snake_case__ ): """simple docstring""" def __getitem__( self : Optional[Any] , lowercase__ : str ): if "seer" in x and "in1k" not in x: __lowercase : Dict = RegNetModel else: __lowercase : Optional[Any] = RegNetForImageClassification return val def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ) ->Dict: """simple docstring""" for from_key, to_key in keys: __lowercase : str = from_state_dict[from_key].clone() print(F'Copied key={from_key} to={to_key}' ) return to_state_dict def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase = True, ) ->List[str]: """simple docstring""" print(F'Converting {name}...' ) with torch.no_grad(): __lowercase : List[Any] = from_model_func() __lowercase : Tuple = our_model_func(_lowerCamelCase ).eval() __lowercase : Dict = ModuleTransfer(src=_lowerCamelCase, dest=_lowerCamelCase, raise_if_mismatch=_lowerCamelCase ) __lowercase : List[Any] = torch.randn((1, 3, 2_24, 2_24) ) module_transfer(_lowerCamelCase ) if from_state_dict is not None: __lowercase : str = [] # for seer - in1k finetuned we have to manually copy the head if "seer" in name and "in1k" in name: __lowercase : int = [('0.clf.0.weight', 'classifier.1.weight'), ('0.clf.0.bias', 'classifier.1.bias')] __lowercase : Optional[Any] = manually_copy_vissl_head(_lowerCamelCase, our_model.state_dict(), _lowerCamelCase ) our_model.load_state_dict(_lowerCamelCase ) __lowercase : Tuple = our_model(_lowerCamelCase, output_hidden_states=_lowerCamelCase ) __lowercase : Any = ( our_outputs.logits if isinstance(_lowerCamelCase, _lowerCamelCase ) else our_outputs.last_hidden_state ) __lowercase : List[Any] = from_model(_lowerCamelCase ) __lowercase : Union[str, Any] = from_output[-1] if type(_lowerCamelCase ) is list else from_output # now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state if "seer" in name and "in1k" in name: __lowercase : Optional[Any] = our_outputs.hidden_states[-1] assert torch.allclose(_lowerCamelCase, _lowerCamelCase ), "The model logits don't match the original one." if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / name, commit_message="Add model", use_temp_dir=_lowerCamelCase, ) __lowercase : Optional[int] = 2_24 if 'seer' not in name else 3_84 # we can use the convnext one __lowercase : Union[str, Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k", size=_lowerCamelCase ) image_processor.push_to_hub( repo_path_or_name=save_directory / name, commit_message="Add image processor", use_temp_dir=_lowerCamelCase, ) print(F'Pushed {name}' ) def snake_case__ ( _lowerCamelCase, _lowerCamelCase = None, _lowerCamelCase = True ) ->Dict: """simple docstring""" __lowercase : int = 'imagenet-1k-id2label.json' __lowercase : Tuple = 10_00 __lowercase : List[str] = (1, num_labels) __lowercase : Any = 'huggingface/label-files' __lowercase : Dict = num_labels __lowercase : Dict = json.load(open(cached_download(hf_hub_url(_lowerCamelCase, _lowerCamelCase, repo_type="dataset" ) ), "r" ) ) __lowercase : List[Any] = {int(_lowerCamelCase ): v for k, v in idalabel.items()} __lowercase : str = idalabel __lowercase : Tuple = {v: k for k, v in idalabel.items()} __lowercase : int = partial(_lowerCamelCase, num_labels=_lowerCamelCase, idalabel=_lowerCamelCase, labelaid=_lowerCamelCase ) __lowercase : Dict = { 'regnet-x-002': ImageNetPreTrainedConfig( depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 1_52, 3_68], groups_width=8, layer_type="x" ), 'regnet-x-004': ImageNetPreTrainedConfig( depths=[1, 2, 7, 12], hidden_sizes=[32, 64, 1_60, 3_84], groups_width=16, layer_type="x" ), 'regnet-x-006': ImageNetPreTrainedConfig( depths=[1, 3, 5, 7], hidden_sizes=[48, 96, 2_40, 5_28], groups_width=24, layer_type="x" ), 'regnet-x-008': ImageNetPreTrainedConfig( depths=[1, 3, 7, 5], hidden_sizes=[64, 1_28, 2_88, 6_72], groups_width=16, layer_type="x" ), 'regnet-x-016': ImageNetPreTrainedConfig( depths=[2, 4, 10, 2], hidden_sizes=[72, 1_68, 4_08, 9_12], groups_width=24, layer_type="x" ), 'regnet-x-032': ImageNetPreTrainedConfig( depths=[2, 6, 15, 2], hidden_sizes=[96, 1_92, 4_32, 10_08], groups_width=48, layer_type="x" ), 'regnet-x-040': ImageNetPreTrainedConfig( depths=[2, 5, 14, 2], hidden_sizes=[80, 2_40, 5_60, 13_60], groups_width=40, layer_type="x" ), 'regnet-x-064': ImageNetPreTrainedConfig( depths=[2, 4, 10, 1], hidden_sizes=[1_68, 3_92, 7_84, 16_24], groups_width=56, layer_type="x" ), 'regnet-x-080': ImageNetPreTrainedConfig( depths=[2, 5, 15, 1], hidden_sizes=[80, 2_40, 7_20, 19_20], groups_width=1_20, layer_type="x" ), 'regnet-x-120': ImageNetPreTrainedConfig( depths=[2, 5, 11, 1], hidden_sizes=[2_24, 4_48, 8_96, 22_40], groups_width=1_12, layer_type="x" ), 'regnet-x-160': ImageNetPreTrainedConfig( depths=[2, 6, 13, 1], hidden_sizes=[2_56, 5_12, 8_96, 20_48], groups_width=1_28, layer_type="x" ), 'regnet-x-320': ImageNetPreTrainedConfig( depths=[2, 7, 13, 1], hidden_sizes=[3_36, 6_72, 13_44, 25_20], groups_width=1_68, layer_type="x" ), # y variant 'regnet-y-002': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7], hidden_sizes=[24, 56, 1_52, 3_68], groups_width=8 ), 'regnet-y-004': ImageNetPreTrainedConfig( depths=[1, 3, 6, 6], hidden_sizes=[48, 1_04, 2_08, 4_40], groups_width=8 ), 'regnet-y-006': ImageNetPreTrainedConfig( depths=[1, 3, 7, 4], hidden_sizes=[48, 1_12, 2_56, 6_08], groups_width=16 ), 'regnet-y-008': ImageNetPreTrainedConfig( depths=[1, 3, 8, 2], hidden_sizes=[64, 1_28, 3_20, 7_68], groups_width=16 ), 'regnet-y-016': ImageNetPreTrainedConfig( depths=[2, 6, 17, 2], hidden_sizes=[48, 1_20, 3_36, 8_88], groups_width=24 ), 'regnet-y-032': ImageNetPreTrainedConfig( depths=[2, 5, 13, 1], hidden_sizes=[72, 2_16, 5_76, 15_12], groups_width=24 ), 'regnet-y-040': ImageNetPreTrainedConfig( depths=[2, 6, 12, 2], hidden_sizes=[1_28, 1_92, 5_12, 10_88], groups_width=64 ), 'regnet-y-064': ImageNetPreTrainedConfig( depths=[2, 7, 14, 2], hidden_sizes=[1_44, 2_88, 5_76, 12_96], groups_width=72 ), 'regnet-y-080': ImageNetPreTrainedConfig( depths=[2, 4, 10, 1], hidden_sizes=[1_68, 4_48, 8_96, 20_16], groups_width=56 ), 'regnet-y-120': ImageNetPreTrainedConfig( depths=[2, 5, 11, 1], hidden_sizes=[2_24, 4_48, 8_96, 22_40], groups_width=1_12 ), 'regnet-y-160': ImageNetPreTrainedConfig( depths=[2, 4, 11, 1], hidden_sizes=[2_24, 4_48, 12_32, 30_24], groups_width=1_12 ), 'regnet-y-320': ImageNetPreTrainedConfig( depths=[2, 5, 12, 1], hidden_sizes=[2_32, 6_96, 13_92, 37_12], groups_width=2_32 ), # models created by SEER -> https://arxiv.org/abs/2202.08360 'regnet-y-320-seer': RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[2_32, 6_96, 13_92, 37_12], groups_width=2_32 ), 'regnet-y-640-seer': RegNetConfig(depths=[2, 5, 12, 1], hidden_sizes=[3_28, 9_84, 19_68, 49_20], groups_width=3_28 ), 'regnet-y-1280-seer': RegNetConfig( depths=[2, 7, 17, 1], hidden_sizes=[5_28, 10_56, 29_04, 73_92], groups_width=2_64 ), 'regnet-y-2560-seer': RegNetConfig( depths=[3, 7, 16, 1], hidden_sizes=[6_40, 16_96, 25_44, 50_88], groups_width=6_40 ), 'regnet-y-10b-seer': ImageNetPreTrainedConfig( depths=[2, 7, 17, 1], hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80], groups_width=10_10 ), # finetuned on imagenet 'regnet-y-320-seer-in1k': ImageNetPreTrainedConfig( depths=[2, 5, 12, 1], hidden_sizes=[2_32, 6_96, 13_92, 37_12], groups_width=2_32 ), 'regnet-y-640-seer-in1k': ImageNetPreTrainedConfig( depths=[2, 5, 12, 1], hidden_sizes=[3_28, 9_84, 19_68, 49_20], groups_width=3_28 ), 'regnet-y-1280-seer-in1k': ImageNetPreTrainedConfig( depths=[2, 7, 17, 1], hidden_sizes=[5_28, 10_56, 29_04, 73_92], groups_width=2_64 ), 'regnet-y-2560-seer-in1k': ImageNetPreTrainedConfig( depths=[3, 7, 16, 1], hidden_sizes=[6_40, 16_96, 25_44, 50_88], groups_width=6_40 ), 'regnet-y-10b-seer-in1k': ImageNetPreTrainedConfig( depths=[2, 7, 17, 1], hidden_sizes=[20_20, 40_40, 1_11_10, 2_82_80], groups_width=10_10 ), } __lowercase : Tuple = NameToOurModelFuncMap() __lowercase : Dict = NameToFromModelFuncMap() # add seer weights logic def load_using_classy_vision(_lowerCamelCase, _lowerCamelCase ) -> Tuple[nn.Module, Dict]: __lowercase : Optional[int] = torch.hub.load_state_dict_from_url(_lowerCamelCase, model_dir=str(_lowerCamelCase ), map_location="cpu" ) __lowercase : Any = model_func() # check if we have a head, if yes add it __lowercase : Tuple = files['classy_state_dict']['base_model']['model'] __lowercase : Dict = model_state_dict['trunk'] model.load_state_dict(_lowerCamelCase ) return model.eval(), model_state_dict["heads"] # pretrained __lowercase : List[Any] = partial( _lowerCamelCase, "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch", lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), ) __lowercase : Tuple = partial( _lowerCamelCase, "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch", lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), ) __lowercase : List[Any] = partial( _lowerCamelCase, "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch", lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ), ) __lowercase : Optional[int] = partial( _lowerCamelCase, "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch", lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27, group_width=10_10, w_a=17_44, w_a=6_2_0.8_3, w_m=2.5_2 ) ) ), ) # IN1K finetuned __lowercase : int = partial( _lowerCamelCase, "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch", lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), ) __lowercase : Dict = partial( _lowerCamelCase, "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch", lambda: FakeRegNetVisslWrapper(RegNetYaagf() ), ) __lowercase : List[Any] = partial( _lowerCamelCase, "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch", lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ), ) __lowercase : str = partial( _lowerCamelCase, "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch", lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27, group_width=10_10, w_a=17_44, w_a=6_2_0.8_3, w_m=2.5_2 ) ) ), ) if model_name: convert_weight_and_push( _lowerCamelCase, names_to_from_model_map[model_name], names_to_ours_model_map[model_name], names_to_config[model_name], _lowerCamelCase, _lowerCamelCase, ) else: for model_name, config in names_to_config.items(): convert_weight_and_push( _lowerCamelCase, names_to_from_model_map[model_name], names_to_ours_model_map[model_name], _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, ) return config, expected_shape if __name__ == "__main__": __A : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default=None, type=str, help=( 'The name of the model you wish to convert, it must be one of the supported regnet* architecture,' ' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.' ), ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=Path, required=True, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', default=True, type=bool, required=False, help='If True, push model and image processor to the hub.', ) __A : List[Any] = parser.parse_args() __A : Any = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
575
from dataclasses import dataclass, field from typing import Optional @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be trained.'} ) lowerCamelCase : Optional[str] = field( default='./' , metadata={'help': 'Save dir where model repo is cloned and models updates are saved to.'} ) lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path of training dataset.'} ) lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} ) lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for training.'} ) lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for evaluation.'} ) lowerCamelCase : Optional[float] = field(default=0.1 , metadata={'help': 'Value of weight decay.'} ) lowerCamelCase : Optional[int] = field( default=1_00_00 , metadata={'help': 'Size of buffer used to shuffle streaming dataset.'} ) lowerCamelCase : Optional[float] = field(default=2e-4 , metadata={'help': 'Learning rate fo training.'} ) lowerCamelCase : Optional[str] = field(default='cosine' , metadata={'help': 'Learning rate.'} ) lowerCamelCase : Optional[int] = field( default=7_50 , metadata={'help': 'Number of warmup steps in the learning rate schedule.'} ) lowerCamelCase : Optional[int] = field( default=16 , metadata={'help': 'Number of gradient accumulation steps.'} ) lowerCamelCase : Optional[bool] = field( default=snake_case__ , metadata={'help': 'Use gradient checkpointing to reduce memory footprint.'} ) lowerCamelCase : Optional[int] = field(default=5_00_00 , metadata={'help': 'Maximum number of training steps.'} ) lowerCamelCase : Optional[int] = field( default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} ) lowerCamelCase : Optional[int] = field(default=10_24 , metadata={'help': 'Sequence lengths used for training.'} ) lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Training seed.'} ) lowerCamelCase : Optional[int] = field( default=10_24 , metadata={'help': 'Interval to save checkpoints. Measured as number of forward passes not training steps.'} , ) lowerCamelCase : Optional[str] = field( default=snake_case__ , metadata={'help': 'States path if the training should continue from a checkpoint folder.'} ) lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'If True the data is pretokenized.'} ) @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} ) lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} ) lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size used for evaluation.'} ) lowerCamelCase : Optional[int] = field( default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} ) lowerCamelCase : Optional[int] = field(default=10_24 , metadata={'help': 'Length of sequences to be evaluated.'} ) lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} ) @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} ) lowerCamelCase : Optional[int] = field(default=snake_case__ , metadata={'help': 'Number of workers used for code evaluation.'} ) lowerCamelCase : Optional[int] = field( default=snake_case__ , metadata={'help': 'The number of human-eval tasks to run. If not included all tasks are evaluated.'} , ) lowerCamelCase : Optional[bool] = field( default=snake_case__ , metadata={'help': 'Sample from the language model\'s output distribution.'} ) lowerCamelCase : Optional[float] = field(default=0.2 , metadata={'help': 'Sampling temperature used for generation.'} ) lowerCamelCase : Optional[int] = field(default=2_56 , metadata={'help': 'Maximum number of newly generated tokens.'} ) lowerCamelCase : Optional[int] = field(default=0 , metadata={'help': 'Top-k parameter used for generation.'} ) lowerCamelCase : Optional[float] = field(default=0.95 , metadata={'help': 'Top-p parameter used for nucleus sampling.'} ) lowerCamelCase : Optional[int] = field(default=10 , metadata={'help': 'Number of generations to run in parallel.'} ) lowerCamelCase : Optional[int] = field( default=2_00 , metadata={'help': 'Number of completions to generate for each sample.'} ) lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} ) lowerCamelCase : Optional[str] = field( default='eval_results.json' , metadata={'help': 'Random seed used for evaluation.'} ) lowerCamelCase : Optional[str] = field( default='0' , metadata={'help': 'Allow `code_eval` to execute Python code on machine'} ) lowerCamelCase : Optional[int] = field( default=-1 , metadata={ 'help': ( 'Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive' ' number corresponds to which GPU device id to run on.' ) } , ) @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : Optional[int] = field( default=snake_case__ , metadata={ 'help': 'The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.' } , ) lowerCamelCase : Optional[str] = field( default='transformersbook/codeparrot' , metadata={'help': 'Folder or name of dataset to process.'} ) lowerCamelCase : Optional[str] = field( default='codeparrot-clean' , metadata={'help': 'Folder to save processed processed dataset.'} ) lowerCamelCase : Optional[int] = field( default=10_00_00 , metadata={'help': 'Number of files to save per JSON output file.'} ) lowerCamelCase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} ) lowerCamelCase : Optional[float] = field( default=10_00 , metadata={'help': 'Maximum line length in file, otherwise file is filtered.'} ) lowerCamelCase : Optional[float] = field( default=1_00 , metadata={'help': 'Maximum mean line length in file, otherwise file is filtered.'} ) lowerCamelCase : Optional[float] = field( default=0.25 , metadata={'help': 'Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'} ) lowerCamelCase : Optional[float] = field( default=1.5 , metadata={'help': 'Minimum character token ratio for the file, otherwise file is filtered.'} ) lowerCamelCase : Optional[float] = field( default=0.7 , metadata={'help': 'Probability for filtering config, test and uncommon files.'} ) lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} , ) lowerCamelCase : Optional[bool] = field( default=snake_case__ , metadata={'help': 'If True, near-duplicate samples are removed.'} ) lowerCamelCase : Optional[float] = field( default=0.85 , metadata={'help': 'Jaccard threshold for near-duplicate samples.'} ) @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : Optional[str] = field( default='gpt2' , metadata={'help': 'Base tokenizer to build new tokenizer from.'} ) lowerCamelCase : Optional[str] = field( default='transformersbook/codeparrot-train' , metadata={'help': 'Dataset to train tokenizer on.'} ) lowerCamelCase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} ) lowerCamelCase : Optional[int] = field(default=20_00_00 , metadata={'help': 'Number of examples to train tokenizer on.'} ) lowerCamelCase : Optional[int] = field( default=3_27_68 , metadata={'help': 'Number of examples to train the tokenizer on.'} ) lowerCamelCase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of new tokenizer.'} ) lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'Push saved tokenizer to the hub.'} ) @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} ) lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path to the dataset to pretokenize.'} ) lowerCamelCase : Optional[str] = field( default='tokenized-codeparrot-train' , metadata={'help': 'Repo name of the pretokenized data.'} ) lowerCamelCase : Optional[int] = field(default=snake_case__ , metadata={'help': 'Number of workers used for code evaluation.'} ) @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : Optional[str] = field( default='gpt2-large' , metadata={'help': 'Configuration to use for model initialization.'} ) lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot' , metadata={'help': 'Tokenizer attached to model.'} ) lowerCamelCase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of the created model.'} ) lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'Push saved tokenizer to the hub.'} )
687
0
"""simple docstring""" def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> str: assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ), F"""The input value of [n={number}] is not an integer""" if number == 1: return 2 elif number < 1: _SCREAMING_SNAKE_CASE : List[str] = F"""The input value of [n={number}] has to be > 0""" raise ValueError(__SCREAMING_SNAKE_CASE ) else: _SCREAMING_SNAKE_CASE : Dict = sylvester(number - 1 ) _SCREAMING_SNAKE_CASE : Any = num - 1 _SCREAMING_SNAKE_CASE : Any = num return lower * upper + 1 if __name__ == "__main__": print(F"The 8th number in Sylvester\'s sequence: {sylvester(8)}")
338
import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE__ ( self: List[Any] ): _lowerCAmelCase :List[str] = 'ylacombe/bark-small' _lowerCAmelCase :int = tempfile.mkdtemp() _lowerCAmelCase :List[str] = 'en_speaker_1' _lowerCAmelCase :Union[str, Any] = 'This is a test string' _lowerCAmelCase :List[Any] = 'speaker_embeddings_path.json' _lowerCAmelCase :str = 'speaker_embeddings' def SCREAMING_SNAKE_CASE__ ( self: str , **_UpperCAmelCase: Optional[Any] ): return AutoTokenizer.from_pretrained(self.checkpoint , **_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE__ ( self: Tuple ): _lowerCAmelCase :List[Any] = self.get_tokenizer() _lowerCAmelCase :List[str] = BarkProcessor(tokenizer=_UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) _lowerCAmelCase :List[str] = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def SCREAMING_SNAKE_CASE__ ( self: List[str] ): _lowerCAmelCase :List[str] = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) _lowerCAmelCase :Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) _lowerCAmelCase :Any = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): _lowerCAmelCase :Tuple = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) _lowerCAmelCase :List[Any] = 35 _lowerCAmelCase :Optional[int] = 2 _lowerCAmelCase :Dict = 8 _lowerCAmelCase :Dict = { 'semantic_prompt': np.ones(_UpperCAmelCase ), 'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ), 'fine_prompt': np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset _lowerCAmelCase :Dict = processor(text=self.input_string , voice_preset=_UpperCAmelCase ) _lowerCAmelCase :List[Any] = inputs['history_prompt'] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() ) # test loading voice preset from npz file _lowerCAmelCase :int = os.path.join(self.tmpdirname , 'file.npz' ) np.savez(_UpperCAmelCase , **_UpperCAmelCase ) _lowerCAmelCase :Dict = processor(text=self.input_string , voice_preset=_UpperCAmelCase ) _lowerCAmelCase :Optional[int] = inputs['history_prompt'] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() ) # test loading voice preset from the hub _lowerCAmelCase :Tuple = processor(text=self.input_string , voice_preset=self.voice_preset ) def SCREAMING_SNAKE_CASE__ ( self: List[Any] ): _lowerCAmelCase :Tuple = self.get_tokenizer() _lowerCAmelCase :Union[str, Any] = BarkProcessor(tokenizer=_UpperCAmelCase ) _lowerCAmelCase :List[Any] = processor(text=self.input_string ) _lowerCAmelCase :List[str] = tokenizer( self.input_string , padding='max_length' , max_length=256 , add_special_tokens=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
687
0
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'microsoft/layoutlmv3-base': 'https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json', } class __a ( snake_case__ ): __snake_case : List[str] = 'layoutlmv3' def __init__( self : Any , UpperCAmelCase : Any=5_02_65 , UpperCAmelCase : Tuple=7_68 , UpperCAmelCase : Optional[int]=12 , UpperCAmelCase : str=12 , UpperCAmelCase : int=30_72 , UpperCAmelCase : Tuple="gelu" , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Optional[Any]=5_12 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : Union[str, Any]=0.02 , UpperCAmelCase : Tuple=1e-5 , UpperCAmelCase : Dict=1 , UpperCAmelCase : Dict=0 , UpperCAmelCase : List[Any]=2 , UpperCAmelCase : Dict=10_24 , UpperCAmelCase : List[str]=1_28 , UpperCAmelCase : List[Any]=1_28 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : Any=32 , UpperCAmelCase : str=1_28 , UpperCAmelCase : Optional[Any]=64 , UpperCAmelCase : Optional[int]=2_56 , UpperCAmelCase : Any=True , UpperCAmelCase : int=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : List[str]=2_24 , UpperCAmelCase : str=3 , UpperCAmelCase : List[Any]=16 , UpperCAmelCase : int=None , **UpperCAmelCase : Tuple , ): super().__init__( vocab_size=_UpperCAmelCase , hidden_size=_UpperCAmelCase , num_hidden_layers=_UpperCAmelCase , num_attention_heads=_UpperCAmelCase , intermediate_size=_UpperCAmelCase , hidden_act=_UpperCAmelCase , hidden_dropout_prob=_UpperCAmelCase , attention_probs_dropout_prob=_UpperCAmelCase , max_position_embeddings=_UpperCAmelCase , type_vocab_size=_UpperCAmelCase , initializer_range=_UpperCAmelCase , layer_norm_eps=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , ) lowerCAmelCase_ : List[str] = max_ad_position_embeddings lowerCAmelCase_ : Dict = coordinate_size lowerCAmelCase_ : Tuple = shape_size lowerCAmelCase_ : List[Any] = has_relative_attention_bias lowerCAmelCase_ : str = rel_pos_bins lowerCAmelCase_ : Union[str, Any] = max_rel_pos lowerCAmelCase_ : Union[str, Any] = has_spatial_attention_bias lowerCAmelCase_ : List[Any] = rel_ad_pos_bins lowerCAmelCase_ : Optional[Any] = max_rel_ad_pos lowerCAmelCase_ : Any = text_embed lowerCAmelCase_ : Tuple = visual_embed lowerCAmelCase_ : List[Any] = input_size lowerCAmelCase_ : Dict = num_channels lowerCAmelCase_ : Optional[int] = patch_size lowerCAmelCase_ : Dict = classifier_dropout class __a ( snake_case__ ): __snake_case : List[str] = version.parse("""1.12""" ) @property def A ( self : Optional[int] ): # The order of inputs is different for question answering and sequence classification if self.task in ["question-answering", "sequence-classification"]: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) else: return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ("""bbox""", {0: """batch""", 1: """sequence"""}), ("""attention_mask""", {0: """batch""", 1: """sequence"""}), ("""pixel_values""", {0: """batch""", 1: """num_channels"""}), ] ) @property def A ( self : str ): return 1e-5 @property def A ( self : str ): return 12 def A ( self : List[str] , UpperCAmelCase : "ProcessorMixin" , UpperCAmelCase : int = -1 , UpperCAmelCase : int = -1 , UpperCAmelCase : bool = False , UpperCAmelCase : Optional["TensorType"] = None , UpperCAmelCase : int = 3 , UpperCAmelCase : int = 40 , UpperCAmelCase : int = 40 , ): setattr(processor.image_processor , """apply_ocr""" , _UpperCAmelCase ) # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX lowerCAmelCase_ : Optional[Any] = compute_effective_axis_dimension( _UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowerCAmelCase_ : Optional[int] = processor.tokenizer.num_special_tokens_to_add(_UpperCAmelCase ) lowerCAmelCase_ : Any = compute_effective_axis_dimension( _UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_UpperCAmelCase ) # Generate dummy inputs according to compute batch and sequence lowerCAmelCase_ : Optional[Any] = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size # Generate dummy bounding boxes lowerCAmelCase_ : Optional[int] = [[[48, 84, 73, 1_28]]] * batch_size # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX # batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) lowerCAmelCase_ : Union[str, Any] = self._generate_dummy_images(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) lowerCAmelCase_ : Dict = dict( processor( _UpperCAmelCase , text=_UpperCAmelCase , boxes=_UpperCAmelCase , return_tensors=_UpperCAmelCase , ) ) return inputs
600
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a = logging.get_logger(__name__) a = { """bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""", """bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""", """bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""", """bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""", """bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""", """bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""", """bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""", """bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""", """bert-large-uncased-whole-word-masking""": ( """https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json""" ), """bert-large-cased-whole-word-masking""": ( """https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json""" ), """bert-large-uncased-whole-word-masking-finetuned-squad""": ( """https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json""" ), """bert-large-cased-whole-word-masking-finetuned-squad""": ( """https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json""" ), """bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""", """bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""", """bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""", """cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""", """cl-tohoku/bert-base-japanese-whole-word-masking""": ( """https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json""" ), """cl-tohoku/bert-base-japanese-char""": ( """https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json""" ), """cl-tohoku/bert-base-japanese-char-whole-word-masking""": ( """https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json""" ), """TurkuNLP/bert-base-finnish-cased-v1""": ( """https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json""" ), """TurkuNLP/bert-base-finnish-uncased-v1""": ( """https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json""" ), """wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""", # See all BERT models at https://huggingface.co/models?filter=bert } class UpperCAmelCase_ (snake_case__ ): """simple docstring""" lowerCamelCase : int = 'bert' def __init__( self: Optional[Any] , _UpperCAmelCase: Tuple=3_0522 , _UpperCAmelCase: int=768 , _UpperCAmelCase: Union[str, Any]=12 , _UpperCAmelCase: Dict=12 , _UpperCAmelCase: List[Any]=3072 , _UpperCAmelCase: List[Any]="gelu" , _UpperCAmelCase: Union[str, Any]=0.1 , _UpperCAmelCase: Dict=0.1 , _UpperCAmelCase: List[Any]=512 , _UpperCAmelCase: Optional[Any]=2 , _UpperCAmelCase: Optional[int]=0.0_2 , _UpperCAmelCase: Any=1e-1_2 , _UpperCAmelCase: Optional[Any]=0 , _UpperCAmelCase: Union[str, Any]="absolute" , _UpperCAmelCase: Dict=True , _UpperCAmelCase: Optional[Any]=None , **_UpperCAmelCase: Optional[int] , ): super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase ) _lowerCAmelCase :List[Any] = vocab_size _lowerCAmelCase :Tuple = hidden_size _lowerCAmelCase :Dict = num_hidden_layers _lowerCAmelCase :Optional[Any] = num_attention_heads _lowerCAmelCase :List[Any] = hidden_act _lowerCAmelCase :int = intermediate_size _lowerCAmelCase :Tuple = hidden_dropout_prob _lowerCAmelCase :Tuple = attention_probs_dropout_prob _lowerCAmelCase :List[Any] = max_position_embeddings _lowerCAmelCase :Dict = type_vocab_size _lowerCAmelCase :Any = initializer_range _lowerCAmelCase :int = layer_norm_eps _lowerCAmelCase :List[Any] = position_embedding_type _lowerCAmelCase :int = use_cache _lowerCAmelCase :Union[str, Any] = classifier_dropout class UpperCAmelCase_ (snake_case__ ): """simple docstring""" @property def SCREAMING_SNAKE_CASE__ ( self: List[Any] ): if self.task == "multiple-choice": _lowerCAmelCase :List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _lowerCAmelCase :Any = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ] )
687
0
'''simple docstring''' import numpy as np from PIL import Image def lowerCamelCase__ ( __lowerCamelCase : np.ndarray , __lowerCamelCase : int , __lowerCamelCase : int ): '''simple docstring''' _UpperCAmelCase : Tuple =np.array(__lowerCamelCase ) if arr.shape[0] != arr.shape[1]: raise ValueError('The input array is not a square matrix' ) _UpperCAmelCase : List[str] =0 _UpperCAmelCase : List[str] =0 _UpperCAmelCase : int =0 _UpperCAmelCase : List[str] =0 # compute the shape of the output matrix _UpperCAmelCase : Optional[Any] =(arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape _UpperCAmelCase : Union[str, Any] =np.zeros((maxpool_shape, maxpool_shape) ) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix _UpperCAmelCase : Union[str, Any] =np.max(arr[i : i + size, j : j + size] ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 _UpperCAmelCase : Any =0 _UpperCAmelCase : str =0 return updated_arr def lowerCamelCase__ ( __lowerCamelCase : np.ndarray , __lowerCamelCase : int , __lowerCamelCase : int ): '''simple docstring''' _UpperCAmelCase : List[Any] =np.array(__lowerCamelCase ) if arr.shape[0] != arr.shape[1]: raise ValueError('The input array is not a square matrix' ) _UpperCAmelCase : Tuple =0 _UpperCAmelCase : Union[str, Any] =0 _UpperCAmelCase : str =0 _UpperCAmelCase : Optional[int] =0 # compute the shape of the output matrix _UpperCAmelCase : List[str] =(arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape _UpperCAmelCase : str =np.zeros((avgpool_shape, avgpool_shape) ) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix _UpperCAmelCase : List[str] =int(np.average(arr[i : i + size, j : j + size] ) ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 _UpperCAmelCase : List[str] =0 _UpperCAmelCase : Optional[int] =0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name='avgpooling', verbose=True) # Loading the image lowercase =Image.open('path_to_image') # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
446
import inspect from typing import Optional, Union import numpy as np import PIL import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import ( PIL_INTERPOLATION, randn_tensor, ) def UpperCamelCase_( __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Tuple ): """simple docstring""" if isinstance(__magic_name__ , torch.Tensor ): return image elif isinstance(__magic_name__ , PIL.Image.Image ): _lowerCAmelCase :Tuple = [image] if isinstance(image[0] , PIL.Image.Image ): _lowerCAmelCase :List[Any] = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image] _lowerCAmelCase :Optional[Any] = np.concatenate(__magic_name__ , axis=0 ) _lowerCAmelCase :Any = np.array(__magic_name__ ).astype(np.floataa ) / 255.0 _lowerCAmelCase :Optional[int] = image.transpose(0 , 3 , 1 , 2 ) _lowerCAmelCase :int = 2.0 * image - 1.0 _lowerCAmelCase :Optional[int] = torch.from_numpy(__magic_name__ ) elif isinstance(image[0] , torch.Tensor ): _lowerCAmelCase :str = torch.cat(__magic_name__ , dim=0 ) return image def UpperCamelCase_( __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : int=0.9995 ): """simple docstring""" if not isinstance(__magic_name__ , np.ndarray ): _lowerCAmelCase :Tuple = True _lowerCAmelCase :str = va.device _lowerCAmelCase :List[str] = va.cpu().numpy() _lowerCAmelCase :List[str] = va.cpu().numpy() _lowerCAmelCase :Any = np.sum(va * va / (np.linalg.norm(__magic_name__ ) * np.linalg.norm(__magic_name__ )) ) if np.abs(__magic_name__ ) > DOT_THRESHOLD: _lowerCAmelCase :Optional[Any] = (1 - t) * va + t * va else: _lowerCAmelCase :int = np.arccos(__magic_name__ ) _lowerCAmelCase :Union[str, Any] = np.sin(__magic_name__ ) _lowerCAmelCase :Union[str, Any] = theta_a * t _lowerCAmelCase :str = np.sin(__magic_name__ ) _lowerCAmelCase :Any = np.sin(theta_a - theta_t ) / sin_theta_a _lowerCAmelCase :Optional[Any] = sin_theta_t / sin_theta_a _lowerCAmelCase :List[Any] = sa * va + sa * va if inputs_are_torch: _lowerCAmelCase :int = torch.from_numpy(__magic_name__ ).to(__magic_name__ ) return va def UpperCamelCase_( __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ): """simple docstring""" _lowerCAmelCase :Any = F.normalize(__magic_name__ , dim=-1 ) _lowerCAmelCase :str = F.normalize(__magic_name__ , dim=-1 ) return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 ) def UpperCamelCase_( __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] ): """simple docstring""" for param in model.parameters(): _lowerCAmelCase :List[str] = value class UpperCAmelCase_ (snake_case__ ): """simple docstring""" def __init__( self: Any , _UpperCAmelCase: AutoencoderKL , _UpperCAmelCase: CLIPTextModel , _UpperCAmelCase: CLIPModel , _UpperCAmelCase: CLIPTokenizer , _UpperCAmelCase: UNetaDConditionModel , _UpperCAmelCase: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , _UpperCAmelCase: CLIPFeatureExtractor , _UpperCAmelCase: str=None , _UpperCAmelCase: Tuple=None , _UpperCAmelCase: Union[str, Any]=None , ): super().__init__() self.register_modules( vae=_UpperCAmelCase , text_encoder=_UpperCAmelCase , clip_model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , coca_model=_UpperCAmelCase , coca_tokenizer=_UpperCAmelCase , coca_transform=_UpperCAmelCase , ) _lowerCAmelCase :int = ( feature_extractor.size if isinstance(feature_extractor.size , _UpperCAmelCase ) else feature_extractor.size['shortest_edge'] ) _lowerCAmelCase :Union[str, Any] = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std ) set_requires_grad(self.text_encoder , _UpperCAmelCase ) set_requires_grad(self.clip_model , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Optional[Union[str, int]] = "auto" ): if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory _lowerCAmelCase :Any = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: List[Any] ): self.enable_attention_slicing(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Any ): set_requires_grad(self.vae , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ): set_requires_grad(self.vae , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Any ): set_requires_grad(self.unet , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: List[Any] ): set_requires_grad(self.unet , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Dict ): # get the original timestep using init_timestep _lowerCAmelCase :Optional[Any] = min(int(num_inference_steps * strength ) , _UpperCAmelCase ) _lowerCAmelCase :List[str] = max(num_inference_steps - init_timestep , 0 ) _lowerCAmelCase :Tuple = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Union[str, Any]=None ): if not isinstance(_UpperCAmelCase , torch.Tensor ): raise ValueError(f"""`image` has to be of type `torch.Tensor` but is {type(_UpperCAmelCase )}""" ) _lowerCAmelCase :Union[str, Any] = image.to(device=_UpperCAmelCase , dtype=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ): _lowerCAmelCase :List[Any] = [ self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_UpperCAmelCase ) ] _lowerCAmelCase :List[str] = torch.cat(_UpperCAmelCase , dim=0 ) else: _lowerCAmelCase :List[str] = self.vae.encode(_UpperCAmelCase ).latent_dist.sample(_UpperCAmelCase ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor _lowerCAmelCase :List[Any] = 0.1_8_2_1_5 * init_latents _lowerCAmelCase :List[Any] = init_latents.repeat_interleave(_UpperCAmelCase , dim=0 ) _lowerCAmelCase :Dict = randn_tensor(init_latents.shape , generator=_UpperCAmelCase , device=_UpperCAmelCase , dtype=_UpperCAmelCase ) # get latents _lowerCAmelCase :Dict = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) _lowerCAmelCase :List[str] = init_latents return latents def SCREAMING_SNAKE_CASE__ ( self: Dict , _UpperCAmelCase: Union[str, Any] ): _lowerCAmelCase :Optional[int] = self.coca_transform(_UpperCAmelCase ).unsqueeze(0 ) with torch.no_grad(), torch.cuda.amp.autocast(): _lowerCAmelCase :Optional[Any] = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) ) _lowerCAmelCase :int = self.coca_tokenizer.decode(generated[0].cpu().numpy() ) return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' ) def SCREAMING_SNAKE_CASE__ ( self: int , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: List[str] ): _lowerCAmelCase :Optional[int] = self.feature_extractor.preprocess(_UpperCAmelCase ) _lowerCAmelCase :List[Any] = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half() _lowerCAmelCase :List[str] = self.clip_model.get_image_features(_UpperCAmelCase ) _lowerCAmelCase :List[Any] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_UpperCAmelCase ) _lowerCAmelCase :Dict = image_embeddings_clip.repeat_interleave(_UpperCAmelCase , dim=0 ) return image_embeddings_clip @torch.enable_grad() def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] , _UpperCAmelCase: List[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple , _UpperCAmelCase: Dict , _UpperCAmelCase: str , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple , ): _lowerCAmelCase :Dict = latents.detach().requires_grad_() _lowerCAmelCase :Optional[Any] = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase ) # predict the noise residual _lowerCAmelCase :Optional[Any] = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ): _lowerCAmelCase :int = self.scheduler.alphas_cumprod[timestep] _lowerCAmelCase :Optional[int] = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf _lowerCAmelCase :str = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 _lowerCAmelCase :Optional[Any] = torch.sqrt(_UpperCAmelCase ) _lowerCAmelCase :List[str] = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler , _UpperCAmelCase ): _lowerCAmelCase :Dict = self.scheduler.sigmas[index] _lowerCAmelCase :Optional[Any] = latents - sigma * noise_pred else: raise ValueError(f"""scheduler type {type(self.scheduler )} not supported""" ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor _lowerCAmelCase :Tuple = 1 / 0.1_8_2_1_5 * sample _lowerCAmelCase :Optional[Any] = self.vae.decode(_UpperCAmelCase ).sample _lowerCAmelCase :List[Any] = (image / 2 + 0.5).clamp(0 , 1 ) _lowerCAmelCase :Tuple = transforms.Resize(self.feature_extractor_size )(_UpperCAmelCase ) _lowerCAmelCase :Tuple = self.normalize(_UpperCAmelCase ).to(latents.dtype ) _lowerCAmelCase :List[Any] = self.clip_model.get_image_features(_UpperCAmelCase ) _lowerCAmelCase :List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_UpperCAmelCase ) _lowerCAmelCase :Tuple = spherical_dist_loss(_UpperCAmelCase , _UpperCAmelCase ).mean() * clip_guidance_scale _lowerCAmelCase :str = -torch.autograd.grad(_UpperCAmelCase , _UpperCAmelCase )[0] if isinstance(self.scheduler , _UpperCAmelCase ): _lowerCAmelCase :Union[str, Any] = latents.detach() + grads * (sigma**2) _lowerCAmelCase :Dict = noise_pred_original else: _lowerCAmelCase :Optional[int] = noise_pred_original - torch.sqrt(_UpperCAmelCase ) * grads return noise_pred, latents @torch.no_grad() def __call__( self: Optional[int] , _UpperCAmelCase: Union[torch.FloatTensor, PIL.Image.Image] , _UpperCAmelCase: Union[torch.FloatTensor, PIL.Image.Image] , _UpperCAmelCase: Optional[str] = None , _UpperCAmelCase: Optional[str] = None , _UpperCAmelCase: Optional[int] = 512 , _UpperCAmelCase: Optional[int] = 512 , _UpperCAmelCase: float = 0.6 , _UpperCAmelCase: Optional[int] = 50 , _UpperCAmelCase: Optional[float] = 7.5 , _UpperCAmelCase: Optional[int] = 1 , _UpperCAmelCase: float = 0.0 , _UpperCAmelCase: Optional[float] = 100 , _UpperCAmelCase: Optional[torch.Generator] = None , _UpperCAmelCase: Optional[str] = "pil" , _UpperCAmelCase: bool = True , _UpperCAmelCase: float = 0.8 , _UpperCAmelCase: float = 0.1 , _UpperCAmelCase: float = 0.1 , ): if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) != batch_size: raise ValueError(f"""You have passed {batch_size} batch_size, but only {len(_UpperCAmelCase )} generators.""" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" ) if isinstance(_UpperCAmelCase , torch.Generator ) and batch_size > 1: _lowerCAmelCase :int = [generator] + [None] * (batch_size - 1) _lowerCAmelCase :List[Any] = [ ('model', self.coca_model is None), ('tokenizer', self.coca_tokenizer is None), ('transform', self.coca_transform is None), ] _lowerCAmelCase :Optional[int] = [x[0] for x in coca_is_none if x[1]] _lowerCAmelCase :List[str] = ', '.join(_UpperCAmelCase ) # generate prompts with coca model if prompt is None if content_prompt is None: if len(_UpperCAmelCase ): raise ValueError( f"""Content prompt is None and CoCa [{coca_is_none_str}] is None.""" f"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" ) _lowerCAmelCase :List[Any] = self.get_image_description(_UpperCAmelCase ) if style_prompt is None: if len(_UpperCAmelCase ): raise ValueError( f"""Style prompt is None and CoCa [{coca_is_none_str}] is None.""" f""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" ) _lowerCAmelCase :Any = self.get_image_description(_UpperCAmelCase ) # get prompt text embeddings for content and style _lowerCAmelCase :Any = self.tokenizer( _UpperCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_UpperCAmelCase , return_tensors='pt' , ) _lowerCAmelCase :str = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0] _lowerCAmelCase :int = self.tokenizer( _UpperCAmelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_UpperCAmelCase , return_tensors='pt' , ) _lowerCAmelCase :Union[str, Any] = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0] _lowerCAmelCase :List[str] = slerp(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # duplicate text embeddings for each generation per prompt _lowerCAmelCase :str = text_embeddings.repeat_interleave(_UpperCAmelCase , dim=0 ) # set timesteps _lowerCAmelCase :Any = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() ) _lowerCAmelCase :Dict = {} if accepts_offset: _lowerCAmelCase :Optional[int] = 1 self.scheduler.set_timesteps(_UpperCAmelCase , **_UpperCAmelCase ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device ) _lowerCAmelCase , _lowerCAmelCase :List[str] = self.get_timesteps(_UpperCAmelCase , _UpperCAmelCase , self.device ) _lowerCAmelCase :int = timesteps[:1].repeat(_UpperCAmelCase ) # Preprocess image _lowerCAmelCase :Dict = preprocess(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) _lowerCAmelCase :int = self.prepare_latents( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , text_embeddings.dtype , self.device , _UpperCAmelCase ) _lowerCAmelCase :Any = preprocess(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) _lowerCAmelCase :Union[str, Any] = self.prepare_latents( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , text_embeddings.dtype , self.device , _UpperCAmelCase ) _lowerCAmelCase :str = slerp(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) if clip_guidance_scale > 0: _lowerCAmelCase :Optional[Any] = self.get_clip_image_embeddings(_UpperCAmelCase , _UpperCAmelCase ) _lowerCAmelCase :Dict = self.get_clip_image_embeddings(_UpperCAmelCase , _UpperCAmelCase ) _lowerCAmelCase :Any = slerp( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. _lowerCAmelCase :int = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: _lowerCAmelCase :Optional[int] = content_text_input.input_ids.shape[-1] _lowerCAmelCase :Union[str, Any] = self.tokenizer([''] , padding='max_length' , max_length=_UpperCAmelCase , return_tensors='pt' ) _lowerCAmelCase :Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt _lowerCAmelCase :Optional[int] = uncond_embeddings.repeat_interleave(_UpperCAmelCase , dim=0 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _lowerCAmelCase :int = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. _lowerCAmelCase :Tuple = (batch_size, self.unet.config.in_channels, height // 8, width // 8) _lowerCAmelCase :Optional[Any] = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps _lowerCAmelCase :Any = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device='cpu' , dtype=_UpperCAmelCase ).to( self.device ) else: _lowerCAmelCase :List[Any] = torch.randn(_UpperCAmelCase , generator=_UpperCAmelCase , device=self.device , dtype=_UpperCAmelCase ) else: if latents.shape != latents_shape: raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" ) _lowerCAmelCase :int = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler _lowerCAmelCase :Optional[Any] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] _lowerCAmelCase :Any = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) _lowerCAmelCase :Any = {} if accepts_eta: _lowerCAmelCase :Any = eta # check if the scheduler accepts generator _lowerCAmelCase :List[Any] = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) if accepts_generator: _lowerCAmelCase :List[Any] = generator with self.progress_bar(total=_UpperCAmelCase ): for i, t in enumerate(_UpperCAmelCase ): # expand the latents if we are doing classifier free guidance _lowerCAmelCase :Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _lowerCAmelCase :Tuple = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase ) # predict the noise residual _lowerCAmelCase :Optional[Any] = self.unet(_UpperCAmelCase , _UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase ).sample # perform classifier free guidance if do_classifier_free_guidance: _lowerCAmelCase , _lowerCAmelCase :List[str] = noise_pred.chunk(2 ) _lowerCAmelCase :Any = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: _lowerCAmelCase :List[Any] = ( text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings ) _lowerCAmelCase , _lowerCAmelCase :List[str] = self.cond_fn( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) # compute the previous noisy sample x_t -> x_t-1 _lowerCAmelCase :str = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor _lowerCAmelCase :str = 1 / 0.1_8_2_1_5 * latents _lowerCAmelCase :Any = self.vae.decode(_UpperCAmelCase ).sample _lowerCAmelCase :List[str] = (image / 2 + 0.5).clamp(0 , 1 ) _lowerCAmelCase :Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": _lowerCAmelCase :List[Any] = self.numpy_to_pil(_UpperCAmelCase ) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=_UpperCAmelCase , nsfw_content_detected=_UpperCAmelCase )
687
0
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def a__ ( A__, A__ ): SCREAMING_SNAKE_CASE_ : Dict = args.log_outputs SCREAMING_SNAKE_CASE_ : Optional[int] = '_'.join(args.dataset.split('/' ) + [args.config, args.split] ) # load metric SCREAMING_SNAKE_CASE_ : List[Any] = load_metric('wer' ) SCREAMING_SNAKE_CASE_ : str = load_metric('cer' ) # compute metrics SCREAMING_SNAKE_CASE_ : Optional[Any] = wer.compute(references=result['target'], predictions=result['prediction'] ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = cer.compute(references=result['target'], predictions=result['prediction'] ) # print & log results SCREAMING_SNAKE_CASE_ : Optional[Any] = F'''WER: {wer_result}\nCER: {cer_result}''' print(A__ ) with open(F'''{dataset_id}_eval_results.txt''', 'w' ) as f: f.write(A__ ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: SCREAMING_SNAKE_CASE_ : Union[str, Any] = F'''log_{dataset_id}_predictions.txt''' SCREAMING_SNAKE_CASE_ : Optional[Any] = F'''log_{dataset_id}_targets.txt''' with open(A__, 'w' ) as p, open(A__, 'w' ) as t: # mapping function to write output def write_to_file(A__, A__ ): p.write(F'''{i}''' + '\n' ) p.write(batch['prediction'] + '\n' ) t.write(F'''{i}''' + '\n' ) t.write(batch['target'] + '\n' ) result.map(A__, with_indices=A__ ) def a__ ( A__ ): SCREAMING_SNAKE_CASE_ : Any = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training SCREAMING_SNAKE_CASE_ : str = re.sub(A__, '', text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! SCREAMING_SNAKE_CASE_ : Optional[Any] = ['\n\n', '\n', ' ', ' '] for t in token_sequences_to_ignore: SCREAMING_SNAKE_CASE_ : List[Any] = ' '.join(text.split(A__ ) ) return text def a__ ( A__ ): SCREAMING_SNAKE_CASE_ : int = load_dataset(args.dataset, args.config, split=args.split, use_auth_token=A__ ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor SCREAMING_SNAKE_CASE_ : List[str] = AutoFeatureExtractor.from_pretrained(args.model_id ) SCREAMING_SNAKE_CASE_ : Optional[Any] = feature_extractor.sampling_rate # resample audio SCREAMING_SNAKE_CASE_ : Union[str, Any] = dataset.cast_column('audio', Audio(sampling_rate=A__ ) ) # load eval pipeline if args.device is None: SCREAMING_SNAKE_CASE_ : str = 0 if torch.cuda.is_available() else -1 SCREAMING_SNAKE_CASE_ : int = pipeline('automatic-speech-recognition', model=args.model_id, device=args.device ) # map function to decode audio def map_to_pred(A__ ): SCREAMING_SNAKE_CASE_ : Tuple = asr( batch['audio']['array'], chunk_length_s=args.chunk_length_s, stride_length_s=args.stride_length_s ) SCREAMING_SNAKE_CASE_ : List[str] = prediction['text'] SCREAMING_SNAKE_CASE_ : Any = normalize_text(batch['sentence'] ) return batch # run inference on all examples SCREAMING_SNAKE_CASE_ : List[Any] = dataset.map(A__, remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(A__, A__ ) if __name__ == "__main__": lowerCAmelCase__ : str =argparse.ArgumentParser() parser.add_argument( '--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers' ) parser.add_argument( '--dataset', type=str, required=True, help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets', ) parser.add_argument( '--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice' ) parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`') parser.add_argument( '--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.' ) parser.add_argument( '--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.' ) parser.add_argument( '--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.' ) parser.add_argument( '--device', type=int, default=None, help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.', ) lowerCAmelCase__ : Union[str, Any] =parser.parse_args() main(args)
101
from __future__ import annotations from collections.abc import Sequence from typing import Literal def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str ): """simple docstring""" _lowerCAmelCase :Optional[int] = list(__magic_name__ ) _lowerCAmelCase :Dict = list(__magic_name__ ) _lowerCAmelCase :Any = 0 for i in range(len(__magic_name__ ) ): if lista[i] != lista[i]: count += 1 _lowerCAmelCase :Union[str, Any] = '_' if count > 1: return False else: return "".join(__magic_name__ ) def UpperCamelCase_( __magic_name__ : list[str] ): """simple docstring""" _lowerCAmelCase :int = [] while True: _lowerCAmelCase :str = ['$'] * len(__magic_name__ ) _lowerCAmelCase :Optional[int] = [] for i in range(len(__magic_name__ ) ): for j in range(i + 1 , len(__magic_name__ ) ): _lowerCAmelCase :int = compare_string(binary[i] , binary[j] ) if k is False: _lowerCAmelCase :str = '*' _lowerCAmelCase :Union[str, Any] = '*' temp.append('X' ) for i in range(len(__magic_name__ ) ): if checka[i] == "$": pi.append(binary[i] ) if len(__magic_name__ ) == 0: return pi _lowerCAmelCase :Any = list(set(__magic_name__ ) ) def UpperCamelCase_( __magic_name__ : int , __magic_name__ : Sequence[float] ): """simple docstring""" _lowerCAmelCase :str = [] for minterm in minterms: _lowerCAmelCase :Any = '' for _ in range(__magic_name__ ): _lowerCAmelCase :Tuple = str(minterm % 2 ) + string minterm //= 2 temp.append(__magic_name__ ) return temp def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str , __magic_name__ : int ): """simple docstring""" _lowerCAmelCase :Optional[Any] = list(__magic_name__ ) _lowerCAmelCase :List[Any] = list(__magic_name__ ) _lowerCAmelCase :Optional[Any] = 0 for i in range(len(__magic_name__ ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def UpperCamelCase_( __magic_name__ : list[list[int]] , __magic_name__ : list[str] ): """simple docstring""" _lowerCAmelCase :str = [] _lowerCAmelCase :List[str] = [0] * len(__magic_name__ ) for i in range(len(chart[0] ) ): _lowerCAmelCase :Dict = 0 _lowerCAmelCase :Optional[Any] = -1 for j in range(len(__magic_name__ ) ): if chart[j][i] == 1: count += 1 _lowerCAmelCase :List[Any] = j if count == 1: _lowerCAmelCase :Dict = 1 for i in range(len(__magic_name__ ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(__magic_name__ ) ): _lowerCAmelCase :Dict = 0 temp.append(prime_implicants[i] ) while True: _lowerCAmelCase :Dict = 0 _lowerCAmelCase :Any = -1 _lowerCAmelCase :Optional[Any] = 0 for i in range(len(__magic_name__ ) ): _lowerCAmelCase :str = chart[i].count(1 ) if count_n > max_n: _lowerCAmelCase :Optional[Any] = count_n _lowerCAmelCase :Dict = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(__magic_name__ ) ): _lowerCAmelCase :str = 0 def UpperCamelCase_( __magic_name__ : list[str] , __magic_name__ : list[str] ): """simple docstring""" _lowerCAmelCase :str = [[0 for x in range(len(__magic_name__ ) )] for x in range(len(__magic_name__ ) )] for i in range(len(__magic_name__ ) ): _lowerCAmelCase :Tuple = prime_implicants[i].count('_' ) for j in range(len(__magic_name__ ) ): if is_for_table(prime_implicants[i] , binary[j] , __magic_name__ ): _lowerCAmelCase :str = 1 return chart def UpperCamelCase_( ): """simple docstring""" _lowerCAmelCase :Tuple = int(input('Enter the no. of variables\n' ) ) _lowerCAmelCase :Tuple = [ float(__magic_name__ ) for x in input( 'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split() ] _lowerCAmelCase :List[str] = decimal_to_binary(__magic_name__ , __magic_name__ ) _lowerCAmelCase :Any = check(__magic_name__ ) print('Prime Implicants are:' ) print(__magic_name__ ) _lowerCAmelCase :List[Any] = prime_implicant_chart(__magic_name__ , __magic_name__ ) _lowerCAmelCase :Tuple = selection(__magic_name__ , __magic_name__ ) print('Essential Prime Implicants are:' ) print(__magic_name__ ) if __name__ == "__main__": import doctest doctest.testmod() main()
687
0
'''simple docstring''' from unittest.mock import patch import pyspark from datasets.packaged_modules.spark.spark import ( Spark, SparkExamplesIterable, _generate_iterable_examples, ) from ..utils import ( require_dill_gt_0_3_2, require_not_windows, ) def _a (lowercase__ : int , lowercase__ : Optional[Any] ) -> Any: """simple docstring""" __snake_case = [] for part_id in partition_order: __snake_case = df.where(f'SPARK_PARTITION_ID() = {part_id}' ).collect() for row_idx, row in enumerate(lowercase__ ): expected_row_ids_and_row_dicts.append((f'{part_id}_{row_idx}', row.asDict()) ) return expected_row_ids_and_row_dicts @require_not_windows @require_dill_gt_0_3_2 def _a () -> str: """simple docstring""" __snake_case = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate() __snake_case = spark.range(1_0_0 ).repartition(1 ) __snake_case = Spark(lowercase__ ) # The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means # that each partition can hold 2 rows. spark_builder._repartition_df_if_needed(max_shard_size=1_6 ) # Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions. assert spark_builder.df.rdd.getNumPartitions() == 5_0 @require_not_windows @require_dill_gt_0_3_2 def _a () -> str: """simple docstring""" __snake_case = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate() __snake_case = spark.range(1_0 ).repartition(2 ) __snake_case = [1, 0] __snake_case = _generate_iterable_examples(lowercase__ , lowercase__ ) # Reverse the partitions. __snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase__ , lowercase__ ) for i, (row_id, row_dict) in enumerate(generate_fn() ): __snake_case = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def _a () -> Tuple: """simple docstring""" __snake_case = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate() __snake_case = spark.range(1_0 ).repartition(1 ) __snake_case = SparkExamplesIterable(lowercase__ ) assert it.n_shards == 1 for i, (row_id, row_dict) in enumerate(lowercase__ ): assert row_id == f'0_{i}' assert row_dict == {"id": i} @require_not_windows @require_dill_gt_0_3_2 def _a () -> Optional[int]: """simple docstring""" __snake_case = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate() __snake_case = spark.range(3_0 ).repartition(3 ) # Mock the generator so that shuffle reverses the partition indices. with patch('numpy.random.Generator' ) as generator_mock: __snake_case = lambda lowercase__ : x.reverse() __snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase__ , [2, 1, 0] ) __snake_case = SparkExamplesIterable(lowercase__ ).shuffle_data_sources(lowercase__ ) assert shuffled_it.n_shards == 3 for i, (row_id, row_dict) in enumerate(lowercase__ ): __snake_case = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def _a () -> List[Any]: """simple docstring""" __snake_case = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate() __snake_case = spark.range(2_0 ).repartition(4 ) # Partitions 0 and 2 __snake_case = SparkExamplesIterable(lowercase__ ).shard_data_sources(worker_id=0 , num_workers=2 ) assert shard_it_a.n_shards == 2 __snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase__ , [0, 2] ) for i, (row_id, row_dict) in enumerate(lowercase__ ): __snake_case = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict # Partitions 1 and 3 __snake_case = SparkExamplesIterable(lowercase__ ).shard_data_sources(worker_id=1 , num_workers=2 ) assert shard_it_a.n_shards == 2 __snake_case = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase__ , [1, 3] ) for i, (row_id, row_dict) in enumerate(lowercase__ ): __snake_case = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def _a () -> Union[str, Any]: """simple docstring""" __snake_case = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate() __snake_case = spark.range(1_0_0 ).repartition(1 ) __snake_case = Spark(lowercase__ ) # Choose a small max_shard_size for maximum partitioning. spark_builder._repartition_df_if_needed(max_shard_size=1 ) # The new number of partitions should not be greater than the number of rows. assert spark_builder.df.rdd.getNumPartitions() == 1_0_0
56
import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py a = """\ @INPROCEEDINGS{Papineni02bleu:a, author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu}, title = {BLEU: a Method for Automatic Evaluation of Machine Translation}, booktitle = {}, year = {2002}, pages = {311--318} } @inproceedings{lin-och-2004-orange, title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\", author = \"Lin, Chin-Yew and Och, Franz Josef\", booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\", month = \"aug 23{--}aug 27\", year = \"2004\", address = \"Geneva, Switzerland\", publisher = \"COLING\", url = \"https://www.aclweb.org/anthology/C04-1072\", pages = \"501--507\", } """ a = """\ BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another. Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation, the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and remains one of the most popular automated and inexpensive metrics. Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations. Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness are not taken into account[citation needed]. BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1 representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional reference translations will increase the BLEU score. """ a = """ Computes BLEU score of translated segments against one or more references. Args: predictions: list of translations to score. Each translation should be tokenized into a list of tokens. references: list of lists of references for each translation. Each reference should be tokenized into a list of tokens. max_order: Maximum n-gram order to use when computing BLEU score. smooth: Whether or not to apply Lin et al. 2004 smoothing. Returns: 'bleu': bleu score, 'precisions': geometric mean of n-gram precisions, 'brevity_penalty': brevity penalty, 'length_ratio': ratio of lengths, 'translation_length': translation_length, 'reference_length': reference_length Examples: >>> predictions = [ ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample ... ] >>> references = [ ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references) ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference) ... ] >>> bleu = datasets.load_metric(\"bleu\") >>> results = bleu.compute(predictions=predictions, references=references) >>> print(results[\"bleu\"]) 1.0 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase_ (datasets.Metric ): """simple docstring""" def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ), 'references': datasets.Sequence( datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ), } ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[ 'https://en.wikipedia.org/wiki/BLEU', 'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213', ] , ) def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: int , _UpperCAmelCase: Optional[int]=4 , _UpperCAmelCase: Optional[int]=False ): _lowerCAmelCase :Any = compute_bleu( reference_corpus=_UpperCAmelCase , translation_corpus=_UpperCAmelCase , max_order=_UpperCAmelCase , smooth=_UpperCAmelCase ) ((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) :Tuple = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
687
0
from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function SCREAMING_SNAKE_CASE = 1.0_5_4_5_7_1_8_1_7e-3_4 # unit of ℏ : J * s SCREAMING_SNAKE_CASE = 3e8 # unit of c : m * s^-1 def _lowerCamelCase ( __A : float , __A : float , __A : float ) -> str: if (force, area, distance).count(0 ) != 1: raise ValueError('''One and only one argument must be 0''' ) if force < 0: raise ValueError('''Magnitude of force can not be negative''' ) if distance < 0: raise ValueError('''Distance can not be negative''' ) if area < 0: raise ValueError('''Area can not be negative''' ) if force == 0: _UpperCAmelCase : List[str] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 240 * (distance) ** 4 ) return {"force": force} elif area == 0: _UpperCAmelCase : Optional[int] = (240 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: _UpperCAmelCase : Optional[int] = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError('''One and only one argument must be 0''' ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
485
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) a = { """configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ """FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""", """FalconForCausalLM""", """FalconModel""", """FalconPreTrainedModel""", """FalconForSequenceClassification""", """FalconForTokenClassification""", """FalconForQuestionAnswering""", ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
687
0
"""simple docstring""" from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class lowercase__ : '''simple docstring''' _UpperCAmelCase = 42 _UpperCAmelCase = None # Automatically constructed _UpperCAmelCase = "dict" _UpperCAmelCase = None _UpperCAmelCase = field(default='''Translation''', init=snake_case__, repr=snake_case__ ) def __call__( self ) -> List[str]: return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def lowerCamelCase_ ( self ) -> Any: from .features import Value return {k: Value('string' ) for k in sorted(self.languages )} @dataclass class lowercase__ : '''simple docstring''' _UpperCAmelCase = None _UpperCAmelCase = None _UpperCAmelCase = None # Automatically constructed _UpperCAmelCase = "dict" _UpperCAmelCase = None _UpperCAmelCase = field(default='''TranslationVariableLanguages''', init=snake_case__, repr=snake_case__ ) def lowerCamelCase_ ( self ) -> Optional[Any]: _UpperCAmelCase = sorted(set(self.languages ) ) if self.languages else None _UpperCAmelCase = len(self.languages ) if self.languages else None def __call__( self ) -> Tuple: return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} ) def lowerCamelCase_ ( self , snake_case ) -> Optional[Any]: _UpperCAmelCase = set(self.languages ) if self.languages and set(_UpperCAmelCase ) - lang_set: raise ValueError( f'Some languages in example ({", ".join(sorted(set(_UpperCAmelCase ) - lang_set ) )}) are not in valid set ({", ".join(_UpperCAmelCase )}).' ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. _UpperCAmelCase = [] for lang, text in translation_dict.items(): if isinstance(_UpperCAmelCase , _UpperCAmelCase ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. _UpperCAmelCase = zip(*sorted(_UpperCAmelCase ) ) return {"language": languages, "translation": translations} def lowerCamelCase_ ( self ) -> int: from .features import Sequence, Value return { "language": Sequence(Value('string' ) ), "translation": Sequence(Value('string' ) ), }
573
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" def __init__( self: str , _UpperCAmelCase: str , _UpperCAmelCase: Optional[int]=7 , _UpperCAmelCase: Union[str, Any]=3 , _UpperCAmelCase: int=18 , _UpperCAmelCase: List[Any]=30 , _UpperCAmelCase: List[Any]=400 , _UpperCAmelCase: Optional[Any]=True , _UpperCAmelCase: Any=None , _UpperCAmelCase: Any=True , _UpperCAmelCase: int=None , _UpperCAmelCase: Union[str, Any]=True , ): _lowerCAmelCase :Tuple = size if size is not None else {'shortest_edge': 20} _lowerCAmelCase :str = crop_size if crop_size is not None else {'height': 18, 'width': 18} _lowerCAmelCase :str = parent _lowerCAmelCase :List[Any] = batch_size _lowerCAmelCase :Optional[Any] = num_channels _lowerCAmelCase :Optional[Any] = image_size _lowerCAmelCase :int = min_resolution _lowerCAmelCase :List[str] = max_resolution _lowerCAmelCase :List[str] = do_resize _lowerCAmelCase :Optional[int] = size _lowerCAmelCase :str = do_center_crop _lowerCAmelCase :int = crop_size _lowerCAmelCase :Optional[int] = do_flip_channel_order def SCREAMING_SNAKE_CASE__ ( self: List[Any] ): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class UpperCAmelCase_ (snake_case__ , unittest.TestCase ): """simple docstring""" lowerCamelCase : Any = MobileViTImageProcessor if is_vision_available() else None def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): _lowerCAmelCase :Optional[Any] = MobileViTImageProcessingTester(self ) @property def SCREAMING_SNAKE_CASE__ ( self: str ): return self.image_processor_tester.prepare_image_processor_dict() def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ): _lowerCAmelCase :str = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_UpperCAmelCase , 'do_resize' ) ) self.assertTrue(hasattr(_UpperCAmelCase , 'size' ) ) self.assertTrue(hasattr(_UpperCAmelCase , 'do_center_crop' ) ) self.assertTrue(hasattr(_UpperCAmelCase , 'center_crop' ) ) self.assertTrue(hasattr(_UpperCAmelCase , 'do_flip_channel_order' ) ) def SCREAMING_SNAKE_CASE__ ( self: Any ): _lowerCAmelCase :Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 20} ) self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} ) _lowerCAmelCase :Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'shortest_edge': 42} ) self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} ) def SCREAMING_SNAKE_CASE__ ( self: List[Any] ): pass def SCREAMING_SNAKE_CASE__ ( self: int ): # Initialize image_processing _lowerCAmelCase :Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _lowerCAmelCase :Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , Image.Image ) # Test not batched input _lowerCAmelCase :Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _lowerCAmelCase :str = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def SCREAMING_SNAKE_CASE__ ( self: Tuple ): # Initialize image_processing _lowerCAmelCase :int = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _lowerCAmelCase :List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , np.ndarray ) # Test not batched input _lowerCAmelCase :List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _lowerCAmelCase :List[str] = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def SCREAMING_SNAKE_CASE__ ( self: Any ): # Initialize image_processing _lowerCAmelCase :Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _lowerCAmelCase :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(_UpperCAmelCase , torch.Tensor ) # Test not batched input _lowerCAmelCase :List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched _lowerCAmelCase :int = image_processing(_UpperCAmelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
687
0
from __future__ import annotations import math def __lowerCAmelCase ( A_ : int , A_ : int , A_ : bool , A_ : list[int] , A_ : float ) -> List[Any]: if depth < 0: raise ValueError("Depth cannot be less than 0" ) if len(A_ ) == 0: raise ValueError("Scores cannot be empty" ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 , node_index * 2 , A_ , A_ , A_ ) , minimax(depth + 1 , node_index * 2 + 1 , A_ , A_ , A_ ) , ) return min( minimax(depth + 1 , node_index * 2 , A_ , A_ , A_ ) , minimax(depth + 1 , node_index * 2 + 1 , A_ , A_ , A_ ) , ) def __lowerCAmelCase ( ) -> Optional[Any]: __UpperCAmelCase = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23] __UpperCAmelCase = math.log(len(A_ ) , 2 ) print("Optimal value : " , end="" ) print(minimax(0 , 0 , A_ , A_ , A_ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
221
import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class UpperCAmelCase_ (datasets.BuilderConfig ): """simple docstring""" lowerCamelCase : Optional[datasets.Features] = None class UpperCAmelCase_ (datasets.ArrowBasedBuilder ): """simple docstring""" lowerCamelCase : Any = PandasConfig def SCREAMING_SNAKE_CASE__ ( self: int ): return datasets.DatasetInfo(features=self.config.features ) def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: List[str] ): if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) _lowerCAmelCase :Dict = dl_manager.download_and_extract(self.config.data_files ) if isinstance(_UpperCAmelCase , (str, list, tuple) ): _lowerCAmelCase :Any = data_files if isinstance(_UpperCAmelCase , _UpperCAmelCase ): _lowerCAmelCase :Dict = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive _lowerCAmelCase :List[Any] = [dl_manager.iter_files(_UpperCAmelCase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )] _lowerCAmelCase :Any = [] for split_name, files in data_files.items(): if isinstance(_UpperCAmelCase , _UpperCAmelCase ): _lowerCAmelCase :str = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive _lowerCAmelCase :Union[str, Any] = [dl_manager.iter_files(_UpperCAmelCase ) for file in files] splits.append(datasets.SplitGenerator(name=_UpperCAmelCase , gen_kwargs={'files': files} ) ) return splits def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: pa.Table ): if self.config.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example _lowerCAmelCase :str = table_cast(_UpperCAmelCase , self.config.features.arrow_schema ) return pa_table def SCREAMING_SNAKE_CASE__ ( self: List[str] , _UpperCAmelCase: Dict ): for i, file in enumerate(itertools.chain.from_iterable(_UpperCAmelCase ) ): with open(_UpperCAmelCase , 'rb' ) as f: _lowerCAmelCase :Optional[Any] = pa.Table.from_pandas(pd.read_pickle(_UpperCAmelCase ) ) yield i, self._cast_table(_UpperCAmelCase )
687
0
'''simple docstring''' import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse('0.8.3'): raise Exception('requires gluonnlp == 0.8.3') if version.parse(mx.__version__) != version.parse('1.5.0'): raise Exception('requires mxnet == 1.5.0') logging.set_verbosity_info() SCREAMING_SNAKE_CASE = logging.get_logger(__name__) SCREAMING_SNAKE_CASE = 'The Nymphenburg Palace is a beautiful palace in Munich!' def lowercase_ ( __A : str , __A : str ) -> Dict: """simple docstring""" lowercase : Union[str, Any] ={ 'attention_cell': 'multi_head', 'num_layers': 4, 'units': 1_0_2_4, 'hidden_size': 7_6_8, 'max_length': 5_1_2, 'num_heads': 8, 'scaled': True, 'dropout': 0.1, 'use_residual': True, 'embed_size': 1_0_2_4, 'embed_dropout': 0.1, 'word_embed': None, 'layer_norm_eps': 1E-5, 'token_type_vocab_size': 2, } lowercase : List[Any] =bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py lowercase : Optional[int] =BERTEncoder( attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=__A , output_all_encodings=__A , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , __A ) , ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later lowercase : str ='openwebtext_ccnews_stories_books_cased' # Specify download folder to Gluonnlp's vocab lowercase : Optional[Any] =os.path.join(get_home_dir() , '''models''' ) lowercase : Union[str, Any] =_load_vocab(__A , __A , __A , cls=__A ) lowercase : Tuple =nlp.model.BERTModel( __A , len(__A ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=__A , use_token_type_embed=__A , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=__A , use_decoder=__A , ) original_bort.load_parameters(__A , cast_dtype=__A , ignore_extra=__A ) lowercase : List[str] =original_bort._collect_params_with_prefix() # Build our config 🤗 lowercase : Dict ={ 'architectures': ['BertForMaskedLM'], 'attention_probs_dropout_prob': predefined_args['dropout'], 'hidden_act': 'gelu', 'hidden_dropout_prob': predefined_args['dropout'], 'hidden_size': predefined_args['embed_size'], 'initializer_range': 0.02, 'intermediate_size': predefined_args['hidden_size'], 'layer_norm_eps': predefined_args['layer_norm_eps'], 'max_position_embeddings': predefined_args['max_length'], 'model_type': 'bort', 'num_attention_heads': predefined_args['num_heads'], 'num_hidden_layers': predefined_args['num_layers'], 'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa 'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa 'vocab_size': len(__A ), } lowercase : Tuple =BertConfig.from_dict(__A ) lowercase : Optional[int] =BertForMaskedLM(__A ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(__A : Tuple ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(__A : Optional[int] , __A : Union[str, Any] ): lowercase : str =hf_param.shape lowercase : List[Any] =to_torch(params[gluon_param] ) lowercase : str =gluon_param.shape assert ( shape_hf == shape_gluon ), F'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers' return gluon_param lowercase : str =check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' ) lowercase : Dict =check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' ) lowercase : List[str] =check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' ) lowercase : Any =check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) lowercase : Union[str, Any] =torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): lowercase : BertLayer =hf_bort_model.bert.encoder.layer[i] # self attention lowercase : BertSelfAttention =layer.attention.self lowercase : Any =check_and_map_params( self_attn.key.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' ) lowercase : int =check_and_map_params( self_attn.key.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' ) lowercase : str =check_and_map_params( self_attn.query.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' ) lowercase : Tuple =check_and_map_params( self_attn.query.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' ) lowercase : Dict =check_and_map_params( self_attn.value.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' ) lowercase : Tuple =check_and_map_params( self_attn.value.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' ) # self attention output lowercase : BertSelfOutput =layer.attention.output lowercase : int =check_and_map_params( self_output.dense.bias , F'encoder.transformer_cells.{i}.proj.bias' ) lowercase : int =check_and_map_params( self_output.dense.weight , F'encoder.transformer_cells.{i}.proj.weight' ) lowercase : Dict =check_and_map_params( self_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.layer_norm.beta' ) lowercase : List[str] =check_and_map_params( self_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.layer_norm.gamma' ) # intermediate lowercase : BertIntermediate =layer.intermediate lowercase : List[Any] =check_and_map_params( intermediate.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_1.bias' ) lowercase : List[Any] =check_and_map_params( intermediate.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_1.weight' ) # output lowercase : BertOutput =layer.output lowercase : Tuple =check_and_map_params( bert_output.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_2.bias' ) lowercase : Tuple =check_and_map_params( bert_output.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_2.weight' ) lowercase : Tuple =check_and_map_params( bert_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.ffn.layer_norm.beta' ) lowercase : Optional[int] =check_and_map_params( bert_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models lowercase : List[str] =RobertaTokenizer.from_pretrained('''roberta-base''' ) lowercase : str =tokenizer.encode_plus(__A )['input_ids'] # Get gluon output lowercase : List[str] =mx.nd.array([input_ids] ) lowercase : Any =original_bort(inputs=__A , token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(__A ) lowercase : Any =BertModel.from_pretrained(__A ) hf_bort_model.eval() lowercase : Union[str, Any] =tokenizer.encode_plus(__A , return_tensors='''pt''' ) lowercase : str =hf_bort_model(**__A )[0] lowercase : Union[str, Any] =output_gluon[0].asnumpy() lowercase : Dict =output_hf[0].detach().numpy() lowercase : Optional[int] =np.max(np.abs(hf_layer - gluon_layer ) ).item() lowercase : Union[str, Any] =np.allclose(__A , __A , atol=1E-3 ) if success: print('''✔️ Both model do output the same tensors''' ) else: print('''❌ Both model do **NOT** output the same tensors''' ) print('''Absolute difference is:''' , __A ) if __name__ == "__main__": SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument( '--bort_checkpoint_path', default=None, type=str, required=True, help='Path the official Bort params file.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) SCREAMING_SNAKE_CASE = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
94
import glob import os import random from string import ascii_lowercase, digits import cva a = """""" a = """""" a = """""" a = 1 # (0 is vertical, 1 is horizontal) def UpperCamelCase_( ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase :Union[str, Any] = get_dataset(__magic_name__ , __magic_name__ ) print('Processing...' ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :str = update_image_and_anno(__magic_name__ , __magic_name__ , __magic_name__ ) for index, image in enumerate(__magic_name__ ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' _lowerCAmelCase :Optional[Any] = random_chars(32 ) _lowerCAmelCase :str = paths[index].split(os.sep )[-1].rsplit('.' , 1 )[0] _lowerCAmelCase :Tuple = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}""" cva.imwrite(f"""/{file_root}.jpg""" , __magic_name__ , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(f"""Success {index+1}/{len(__magic_name__ )} with {file_name}""" ) _lowerCAmelCase :str = [] for anno in new_annos[index]: _lowerCAmelCase :List[str] = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}""" annos_list.append(__magic_name__ ) with open(f"""/{file_root}.txt""" , 'w' ) as outfile: outfile.write('\n'.join(line for line in annos_list ) ) def UpperCamelCase_( __magic_name__ : str , __magic_name__ : str ): """simple docstring""" _lowerCAmelCase :int = [] _lowerCAmelCase :Union[str, Any] = [] for label_file in glob.glob(os.path.join(__magic_name__ , '*.txt' ) ): _lowerCAmelCase :Optional[int] = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0] with open(__magic_name__ ) as in_file: _lowerCAmelCase :Union[str, Any] = in_file.readlines() _lowerCAmelCase :List[Any] = os.path.join(__magic_name__ , f"""{label_name}.jpg""" ) _lowerCAmelCase :Tuple = [] for obj_list in obj_lists: _lowerCAmelCase :Union[str, Any] = obj_list.rstrip('\n' ).split(' ' ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(__magic_name__ ) labels.append(__magic_name__ ) return img_paths, labels def UpperCamelCase_( __magic_name__ : list , __magic_name__ : list , __magic_name__ : int = 1 ): """simple docstring""" _lowerCAmelCase :str = [] _lowerCAmelCase :Any = [] _lowerCAmelCase :Optional[Any] = [] for idx in range(len(__magic_name__ ) ): _lowerCAmelCase :Optional[int] = [] _lowerCAmelCase :Optional[Any] = img_list[idx] path_list.append(__magic_name__ ) _lowerCAmelCase :List[str] = anno_list[idx] _lowerCAmelCase :Optional[Any] = cva.imread(__magic_name__ ) if flip_type == 1: _lowerCAmelCase :List[Any] = cva.flip(__magic_name__ , __magic_name__ ) for bbox in img_annos: _lowerCAmelCase :List[Any] = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: _lowerCAmelCase :List[str] = cva.flip(__magic_name__ , __magic_name__ ) for bbox in img_annos: _lowerCAmelCase :List[str] = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(__magic_name__ ) new_imgs_list.append(__magic_name__ ) return new_imgs_list, new_annos_lists, path_list def UpperCamelCase_( __magic_name__ : int = 32 ): """simple docstring""" assert number_char > 1, "The number of character should greater than 1" _lowerCAmelCase :str = ascii_lowercase + digits return "".join(random.choice(__magic_name__ ) for _ in range(__magic_name__ ) ) if __name__ == "__main__": main() print("""DONE ✅""")
687
0
from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCamelCase : str = logging.get_logger(__name__) _lowerCamelCase : Union[str, Any] = { "google/vivit-b-16x2-kinetics400": ( "https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json" ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class __snake_case (snake_case__ ): lowerCAmelCase__ = 'vivit' def __init__( self : List[Any] , _UpperCAmelCase : Optional[int]=224 , _UpperCAmelCase : int=32 , _UpperCAmelCase : List[Any]=[2, 16, 16] , _UpperCAmelCase : Dict=3 , _UpperCAmelCase : Optional[int]=768 , _UpperCAmelCase : Any=12 , _UpperCAmelCase : Any=12 , _UpperCAmelCase : List[Any]=3072 , _UpperCAmelCase : List[Any]="gelu_fast" , _UpperCAmelCase : List[Any]=0.0 , _UpperCAmelCase : Optional[Any]=0.0 , _UpperCAmelCase : int=0.02 , _UpperCAmelCase : Optional[Any]=1E-06 , _UpperCAmelCase : Tuple=True , **_UpperCAmelCase : List[Any] , ) -> int: '''simple docstring''' _lowerCAmelCase : List[Any] = hidden_size _lowerCAmelCase : Optional[Any] = num_hidden_layers _lowerCAmelCase : Union[str, Any] = num_attention_heads _lowerCAmelCase : str = intermediate_size _lowerCAmelCase : List[Any] = hidden_act _lowerCAmelCase : int = hidden_dropout_prob _lowerCAmelCase : Tuple = attention_probs_dropout_prob _lowerCAmelCase : Tuple = initializer_range _lowerCAmelCase : List[Any] = layer_norm_eps _lowerCAmelCase : List[Any] = image_size _lowerCAmelCase : str = num_frames _lowerCAmelCase : Union[str, Any] = tubelet_size _lowerCAmelCase : int = num_channels _lowerCAmelCase : Dict = qkv_bias super().__init__(**_UpperCAmelCase )
429
import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging a = logging.get_logger(__name__) def UpperCamelCase_( __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] ): """simple docstring""" _lowerCAmelCase :Optional[Any] = nn.functional.normalize(__magic_name__ ) _lowerCAmelCase :List[str] = nn.functional.normalize(__magic_name__ ) return torch.mm(__magic_name__ , normalized_text_embeds.t() ) class UpperCAmelCase_ (snake_case__ ): """simple docstring""" lowerCamelCase : str = CLIPConfig lowerCamelCase : Any = ['CLIPEncoderLayer'] def __init__( self: Optional[int] , _UpperCAmelCase: CLIPConfig ): super().__init__(_UpperCAmelCase ) _lowerCAmelCase :Any = CLIPVisionModel(config.vision_config ) _lowerCAmelCase :Optional[int] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=_UpperCAmelCase ) _lowerCAmelCase :int = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=_UpperCAmelCase ) _lowerCAmelCase :Any = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=_UpperCAmelCase ) _lowerCAmelCase :str = nn.Parameter(torch.ones(17 ) , requires_grad=_UpperCAmelCase ) _lowerCAmelCase :Optional[int] = nn.Parameter(torch.ones(3 ) , requires_grad=_UpperCAmelCase ) @torch.no_grad() def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Optional[int] , _UpperCAmelCase: Dict ): _lowerCAmelCase :str = self.vision_model(_UpperCAmelCase )[1] # pooled_output _lowerCAmelCase :Union[str, Any] = self.visual_projection(_UpperCAmelCase ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 _lowerCAmelCase :Optional[int] = cosine_distance(_UpperCAmelCase , self.special_care_embeds ).cpu().float().numpy() _lowerCAmelCase :List[str] = cosine_distance(_UpperCAmelCase , self.concept_embeds ).cpu().float().numpy() _lowerCAmelCase :str = [] _lowerCAmelCase :List[Any] = image_embeds.shape[0] for i in range(_UpperCAmelCase ): _lowerCAmelCase :Optional[Any] = {'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images _lowerCAmelCase :List[Any] = 0.0 for concept_idx in range(len(special_cos_dist[0] ) ): _lowerCAmelCase :List[Any] = special_cos_dist[i][concept_idx] _lowerCAmelCase :Dict = self.special_care_embeds_weights[concept_idx].item() _lowerCAmelCase :List[Any] = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img['special_scores'][concept_idx]} ) _lowerCAmelCase :Any = 0.0_1 for concept_idx in range(len(cos_dist[0] ) ): _lowerCAmelCase :Union[str, Any] = cos_dist[i][concept_idx] _lowerCAmelCase :str = self.concept_embeds_weights[concept_idx].item() _lowerCAmelCase :str = round(concept_cos - concept_threshold + adjustment , 3 ) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(_UpperCAmelCase ) result.append(_UpperCAmelCase ) _lowerCAmelCase :Any = [len(res['bad_concepts'] ) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def SCREAMING_SNAKE_CASE__ ( self: str , _UpperCAmelCase: torch.FloatTensor , _UpperCAmelCase: torch.FloatTensor ): _lowerCAmelCase :Optional[int] = self.vision_model(_UpperCAmelCase )[1] # pooled_output _lowerCAmelCase :Union[str, Any] = self.visual_projection(_UpperCAmelCase ) _lowerCAmelCase :Dict = cosine_distance(_UpperCAmelCase , self.special_care_embeds ) _lowerCAmelCase :List[str] = cosine_distance(_UpperCAmelCase , self.concept_embeds ) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images _lowerCAmelCase :Any = 0.0 _lowerCAmelCase :Union[str, Any] = special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) _lowerCAmelCase :Tuple = torch.any(special_scores > 0 , dim=1 ) _lowerCAmelCase :List[str] = special_care * 0.0_1 _lowerCAmelCase :Any = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] ) _lowerCAmelCase :Optional[Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) _lowerCAmelCase :List[str] = torch.any(concept_scores > 0 , dim=1 ) return images, has_nsfw_concepts
687
0
"""simple docstring""" import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin __A : List[Any] = get_tests_dir('fixtures/test_sentencepiece_bpe.model') class lowerCAmelCase__ ( snake_case__ , unittest.TestCase ): """simple docstring""" __UpperCAmelCase : Tuple = BartphoTokenizer __UpperCAmelCase : Tuple = False __UpperCAmelCase : Union[str, Any] = True def snake_case ( self : Optional[int] ): super().setUp() __lowercase : int = ['▁This', '▁is', '▁a', '▁t', 'est'] __lowercase : str = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) __lowercase : List[str] = {'unk_token': '<unk>'} __lowercase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"] ) with open(self.monolingual_vocab_file , "w" , encoding="utf-8" ) as fp: for token in vocab_tokens: fp.write(f'{token} {vocab_tokens[token]}\n' ) __lowercase : Optional[int] = BartphoTokenizer(_UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case ( self : Optional[int] , **lowercase__ : Dict ): kwargs.update(self.special_tokens_map ) return BartphoTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase ) def snake_case ( self : int , lowercase__ : Optional[Any] ): __lowercase : int = 'This is a là test' __lowercase : Optional[int] = 'This is a<unk><unk> test' return input_text, output_text def snake_case ( self : Union[str, Any] ): __lowercase : Any = BartphoTokenizer(_UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map ) __lowercase : Dict = 'This is a là test' __lowercase : Dict = '▁This ▁is ▁a ▁l à ▁t est'.split() __lowercase : int = tokenizer.tokenize(_UpperCAmelCase ) self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase ) __lowercase : Tuple = tokens + [tokenizer.unk_token] __lowercase : int = [4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
575
from math import atan, cos, radians, sin, tan from .haversine_distance import haversine_distance a = 6_3_7_8_1_3_7.0 a = 6_3_5_6_7_5_2.3_1_4_2_4_5 a = 6_378_137 def UpperCamelCase_( __magic_name__ : float , __magic_name__ : float , __magic_name__ : float , __magic_name__ : float ): """simple docstring""" _lowerCAmelCase :List[Any] = (AXIS_A - AXIS_B) / AXIS_A # Parametric latitudes # https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude _lowerCAmelCase :Union[str, Any] = atan((1 - flattening) * tan(radians(__magic_name__ ) ) ) _lowerCAmelCase :List[str] = atan((1 - flattening) * tan(radians(__magic_name__ ) ) ) # Compute central angle between two points # using haversine theta. sigma = haversine_distance / equatorial radius _lowerCAmelCase :int = haversine_distance(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) / EQUATORIAL_RADIUS # Intermediate P and Q values _lowerCAmelCase :str = (b_lata + b_lata) / 2 _lowerCAmelCase :Tuple = (b_lata - b_lata) / 2 # Intermediate X value # X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2) _lowerCAmelCase :str = (sin(__magic_name__ ) ** 2) * (cos(__magic_name__ ) ** 2) _lowerCAmelCase :Optional[int] = cos(sigma / 2 ) ** 2 _lowerCAmelCase :List[Any] = (sigma - sin(__magic_name__ )) * (x_numerator / x_demonimator) # Intermediate Y value # Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2) _lowerCAmelCase :Dict = (cos(__magic_name__ ) ** 2) * (sin(__magic_name__ ) ** 2) _lowerCAmelCase :str = sin(sigma / 2 ) ** 2 _lowerCAmelCase :Union[str, Any] = (sigma + sin(__magic_name__ )) * (y_numerator / y_denominator) return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value))) if __name__ == "__main__": import doctest doctest.testmod()
687
0
"""simple docstring""" import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml lowerCAmelCase_ = logging.get_logger(__name__) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple: def run_func(__SCREAMING_SNAKE_CASE ): @wraps(__SCREAMING_SNAKE_CASE ) def run_in_eager_mode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): return func(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) @wraps(__SCREAMING_SNAKE_CASE ) @tf.function(experimental_compile=__SCREAMING_SNAKE_CASE ) def run_in_graph_mode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): return func(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) if do_eager_mode is True: if use_xla is not False: raise ValueError( """Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" ) return run_in_eager_mode else: return run_in_graph_mode return run_func def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : Optional[Any] = random.Random() _SCREAMING_SNAKE_CASE : Any = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(__SCREAMING_SNAKE_CASE , shape=(batch_size, sequence_length) , dtype=tf.intaa ) class _snake_case ( snake_case__ ): """simple docstring""" a = 42 a = 42 a = "TensorFlow" @property def _lowerCAmelCase ( self : str): """simple docstring""" return tf.__version__ def _lowerCAmelCase ( self : Optional[Any] , _A : str , _A : int , _A : int): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""") _SCREAMING_SNAKE_CASE : Optional[int] = self._prepare_inference_func(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) return self._measure_speed(_inference) def _lowerCAmelCase ( self : Optional[int] , _A : str , _A : int , _A : int): """simple docstring""" _SCREAMING_SNAKE_CASE : int = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""") _SCREAMING_SNAKE_CASE : List[Any] = self._prepare_train_func(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) return self._measure_speed(_train) def _lowerCAmelCase ( self : Optional[int] , _A : str , _A : int , _A : int): """simple docstring""" if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _UpperCAmelCase) _SCREAMING_SNAKE_CASE : Optional[int] = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""") _SCREAMING_SNAKE_CASE : Union[str, Any] = self._prepare_inference_func(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) return self._measure_memory(_inference) def _lowerCAmelCase ( self : Dict , _A : str , _A : int , _A : int): """simple docstring""" if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , _UpperCAmelCase) _SCREAMING_SNAKE_CASE : List[str] = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""") _SCREAMING_SNAKE_CASE : Optional[Any] = self._prepare_train_func(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) return self._measure_memory(_train) def _lowerCAmelCase ( self : Tuple , _A : str , _A : int , _A : int): """simple docstring""" _SCREAMING_SNAKE_CASE : List[str] = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""") _SCREAMING_SNAKE_CASE : Optional[int] = ( hasattr(_UpperCAmelCase , """architectures""") and isinstance(config.architectures , _UpperCAmelCase) and len(config.architectures) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _SCREAMING_SNAKE_CASE : str = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model _SCREAMING_SNAKE_CASE : int = __import__("""transformers""" , fromlist=[model_class]) _SCREAMING_SNAKE_CASE : List[Any] = getattr(_UpperCAmelCase , _UpperCAmelCase) _SCREAMING_SNAKE_CASE : Any = model_cls(_UpperCAmelCase) except ImportError: raise ImportError( f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to""" """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""") else: _SCREAMING_SNAKE_CASE : str = TF_MODEL_MAPPING[config.__class__](_UpperCAmelCase) # encoder-decoder has vocab size saved differently _SCREAMING_SNAKE_CASE : str = config.vocab_size if hasattr(_UpperCAmelCase , """vocab_size""") else config.encoder.vocab_size _SCREAMING_SNAKE_CASE : str = random_input_ids(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla) def encoder_decoder_forward(): return model(_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase , training=_UpperCAmelCase) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla) def encoder_forward(): return model(_UpperCAmelCase , training=_UpperCAmelCase) _SCREAMING_SNAKE_CASE : Dict = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def _lowerCAmelCase ( self : Dict , _A : str , _A : int , _A : int): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""") if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""") _SCREAMING_SNAKE_CASE : Union[str, Any] = ( hasattr(_UpperCAmelCase , """architectures""") and isinstance(config.architectures , _UpperCAmelCase) and len(config.architectures) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _SCREAMING_SNAKE_CASE : List[str] = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model _SCREAMING_SNAKE_CASE : Union[str, Any] = __import__("""transformers""" , fromlist=[model_class]) _SCREAMING_SNAKE_CASE : int = getattr(_UpperCAmelCase , _UpperCAmelCase) _SCREAMING_SNAKE_CASE : int = model_cls(_UpperCAmelCase) except ImportError: raise ImportError( f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to""" """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""") else: _SCREAMING_SNAKE_CASE : Dict = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](_UpperCAmelCase) # encoder-decoder has vocab size saved differently _SCREAMING_SNAKE_CASE : List[str] = config.vocab_size if hasattr(_UpperCAmelCase , """vocab_size""") else config.encoder.vocab_size _SCREAMING_SNAKE_CASE : Tuple = random_input_ids(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla) def encoder_decoder_train(): _SCREAMING_SNAKE_CASE : int = model(_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase , labels=_UpperCAmelCase , training=_UpperCAmelCase)[0] _SCREAMING_SNAKE_CASE : int = tf.gradients(_UpperCAmelCase , model.trainable_variables) return gradients @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla) def encoder_train(): _SCREAMING_SNAKE_CASE : str = model(_UpperCAmelCase , labels=_UpperCAmelCase , training=_UpperCAmelCase)[0] _SCREAMING_SNAKE_CASE : List[Any] = tf.gradients(_UpperCAmelCase , model.trainable_variables) return gradients _SCREAMING_SNAKE_CASE : Dict = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def _lowerCAmelCase ( self : int , _A : Any): """simple docstring""" with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""") timeit.repeat(_UpperCAmelCase , repeat=1 , number=5) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average _SCREAMING_SNAKE_CASE : Union[str, Any] = timeit.repeat( _UpperCAmelCase , repeat=self.args.repeat , number=1_0 , ) return min(_UpperCAmelCase) / 1_0.0 except ResourceExhaustedError as e: self.print_fn(f"""Doesn't fit on GPU. {e}""") def _lowerCAmelCase ( self : Any , _A : Callable[[], None]): """simple docstring""" logger.info( """Note that TensorFlow allocates more memory than """ """it might need to speed up computation. """ """The memory reported here corresponds to the memory """ """reported by `nvidia-smi`, which can vary depending """ """on total available memory on the GPU that is used.""") with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( """`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory""" """ consumption line by line.""") _SCREAMING_SNAKE_CASE : List[str] = start_memory_tracing("""transformers""") if self.args.is_tpu: # tpu raise NotImplementedError( """Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking""" """ with `args.memory=False`""") elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( """py3nvml not installed, we won\'t log GPU memory usage. """ """Install py3nvml (pip install py3nvml) to log information about GPU.""") _SCREAMING_SNAKE_CASE : Dict = 'N/A' else: logger.info( """Measuring total GPU usage on GPU device. Make sure to not have additional processes""" """ running on the same GPU.""") # init nvml nvml.nvmlInit() func() _SCREAMING_SNAKE_CASE : List[str] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx) _SCREAMING_SNAKE_CASE : Dict = nvml.nvmlDeviceGetMemoryInfo(_UpperCAmelCase) _SCREAMING_SNAKE_CASE : Union[str, Any] = meminfo.used _SCREAMING_SNAKE_CASE : Any = Memory(_UpperCAmelCase) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( """When enabling line by line tracing, the max peak memory for CPU is inaccurate in""" """ TensorFlow.""") _SCREAMING_SNAKE_CASE : Dict = None else: _SCREAMING_SNAKE_CASE : str = measure_peak_memory_cpu(_UpperCAmelCase) _SCREAMING_SNAKE_CASE : List[str] = Memory(_UpperCAmelCase) if isinstance(_UpperCAmelCase , _UpperCAmelCase) else memory_bytes if self.args.trace_memory_line_by_line: _SCREAMING_SNAKE_CASE : Tuple = stop_memory_tracing(_UpperCAmelCase) if memory is None: _SCREAMING_SNAKE_CASE : Optional[int] = summary.total else: _SCREAMING_SNAKE_CASE : Any = None return memory, summary except ResourceExhaustedError as e: self.print_fn(f"""Doesn't fit on GPU. {e}""") return "N/A", None
338
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging a = logging.get_logger(__name__) class UpperCAmelCase_ (snake_case__ ): """simple docstring""" lowerCamelCase : Dict = 'encoder-decoder' lowerCamelCase : Optional[Any] = True def __init__( self: str , **_UpperCAmelCase: int ): super().__init__(**_UpperCAmelCase ) assert ( "encoder" in kwargs and "decoder" in kwargs ), "Config has to be initialized with encoder and decoder config" _lowerCAmelCase :Optional[Any] = kwargs.pop('encoder' ) _lowerCAmelCase :Dict = encoder_config.pop('model_type' ) _lowerCAmelCase :str = kwargs.pop('decoder' ) _lowerCAmelCase :str = decoder_config.pop('model_type' ) from ..auto.configuration_auto import AutoConfig _lowerCAmelCase :str = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase ) _lowerCAmelCase :Tuple = AutoConfig.for_model(_UpperCAmelCase , **_UpperCAmelCase ) _lowerCAmelCase :Any = True @classmethod def SCREAMING_SNAKE_CASE__ ( cls: Tuple , _UpperCAmelCase: PretrainedConfig , _UpperCAmelCase: PretrainedConfig , **_UpperCAmelCase: str ): logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' ) _lowerCAmelCase :Dict = True _lowerCAmelCase :List[str] = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Dict ): _lowerCAmelCase :Union[str, Any] = copy.deepcopy(self.__dict__ ) _lowerCAmelCase :Optional[int] = self.encoder.to_dict() _lowerCAmelCase :Union[str, Any] = self.decoder.to_dict() _lowerCAmelCase :List[str] = self.__class__.model_type return output
687
0
from dataclasses import dataclass, field from typing import Optional @dataclass class __a : __snake_case : Optional[str] = field( default="""codeparrot/codeparrot""" ,metadata={"""help""": """Model name or path of model to be trained."""} ) __snake_case : Optional[str] = field( default="""./""" ,metadata={"""help""": """Save dir where model repo is cloned and models updates are saved to."""} ) __snake_case : Optional[str] = field( default="""codeparrot/codeparrot-clean-train""" ,metadata={"""help""": """Name or path of training dataset."""} ) __snake_case : Optional[str] = field( default="""codeparrot/codeparrot-clean-valid""" ,metadata={"""help""": """Name or path of validation dataset."""} ) __snake_case : Optional[int] = field(default=2 ,metadata={"""help""": """Batch size for training."""} ) __snake_case : Optional[int] = field(default=2 ,metadata={"""help""": """Batch size for evaluation."""} ) __snake_case : Optional[float] = field(default=0.1 ,metadata={"""help""": """Value of weight decay."""} ) __snake_case : Optional[int] = field( default=1_0000 ,metadata={"""help""": """Size of buffer used to shuffle streaming dataset."""} ) __snake_case : Optional[float] = field(default=2e-4 ,metadata={"""help""": """Learning rate fo training."""} ) __snake_case : Optional[str] = field(default="""cosine""" ,metadata={"""help""": """Learning rate."""} ) __snake_case : Optional[int] = field( default=750 ,metadata={"""help""": """Number of warmup steps in the learning rate schedule."""} ) __snake_case : Optional[int] = field( default=16 ,metadata={"""help""": """Number of gradient accumulation steps."""} ) __snake_case : Optional[bool] = field( default=snake_case__ ,metadata={"""help""": """Use gradient checkpointing to reduce memory footprint."""} ) __snake_case : Optional[int] = field(default=5_0000 ,metadata={"""help""": """Maximum number of training steps."""} ) __snake_case : Optional[int] = field( default=-1 ,metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} ) __snake_case : Optional[int] = field(default=1024 ,metadata={"""help""": """Sequence lengths used for training."""} ) __snake_case : Optional[int] = field(default=1 ,metadata={"""help""": """Training seed."""} ) __snake_case : Optional[int] = field( default=1024 ,metadata={"""help""": """Interval to save checkpoints. Measured as number of forward passes not training steps."""} ,) __snake_case : Optional[str] = field( default=snake_case__ ,metadata={"""help""": """States path if the training should continue from a checkpoint folder."""} ) __snake_case : Optional[bool] = field(default=snake_case__ ,metadata={"""help""": """If True the data is pretokenized."""} ) @dataclass class __a : __snake_case : Optional[str] = field( default="""codeparrot/codeparrot""" ,metadata={"""help""": """Model name or path of model to be evaluated."""} ) __snake_case : Optional[str] = field( default="""codeparrot/codeparrot-clean-valid""" ,metadata={"""help""": """Name or path of validation dataset."""} ) __snake_case : Optional[int] = field(default=2 ,metadata={"""help""": """Batch size used for evaluation."""} ) __snake_case : Optional[int] = field( default=-1 ,metadata={"""help""": """Maximum number of evaluation steps. If -1 the full dataset is evaluated."""} ) __snake_case : Optional[int] = field(default=1024 ,metadata={"""help""": """Length of sequences to be evaluated."""} ) __snake_case : Optional[int] = field(default=1 ,metadata={"""help""": """Random seed used for evaluation."""} ) @dataclass class __a : __snake_case : Optional[str] = field( default="""codeparrot/codeparrot""" ,metadata={"""help""": """Model name or path of model to be evaluated."""} ) __snake_case : Optional[int] = field(default=snake_case__ ,metadata={"""help""": """Number of workers used for code evaluation."""} ) __snake_case : Optional[int] = field( default=snake_case__ ,metadata={"""help""": """The number of human-eval tasks to run. If not included all tasks are evaluated."""} ,) __snake_case : Optional[bool] = field( default=snake_case__ ,metadata={"""help""": """Sample from the language model\'s output distribution."""} ) __snake_case : Optional[float] = field(default=0.2 ,metadata={"""help""": """Sampling temperature used for generation."""} ) __snake_case : Optional[int] = field(default=256 ,metadata={"""help""": """Maximum number of newly generated tokens."""} ) __snake_case : Optional[int] = field(default=0 ,metadata={"""help""": """Top-k parameter used for generation."""} ) __snake_case : Optional[float] = field(default=0.95 ,metadata={"""help""": """Top-p parameter used for nucleus sampling."""} ) __snake_case : Optional[int] = field(default=10 ,metadata={"""help""": """Number of generations to run in parallel."""} ) __snake_case : Optional[int] = field( default=200 ,metadata={"""help""": """Number of completions to generate for each sample."""} ) __snake_case : Optional[int] = field(default=1 ,metadata={"""help""": """Random seed used for evaluation."""} ) __snake_case : Optional[str] = field( default="""eval_results.json""" ,metadata={"""help""": """Random seed used for evaluation."""} ) __snake_case : Optional[str] = field( default="""0""" ,metadata={"""help""": """Allow `code_eval` to execute Python code on machine"""} ) __snake_case : Optional[int] = field( default=-1 ,metadata={ """help""": ( """Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive""" """ number corresponds to which GPU device id to run on.""" ) } ,) @dataclass class __a : __snake_case : Optional[int] = field( default=snake_case__ ,metadata={ """help""": """The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.""" } ,) __snake_case : Optional[str] = field( default="""transformersbook/codeparrot""" ,metadata={"""help""": """Folder or name of dataset to process."""} ) __snake_case : Optional[str] = field( default="""codeparrot-clean""" ,metadata={"""help""": """Folder to save processed processed dataset."""} ) __snake_case : Optional[int] = field( default=10_0000 ,metadata={"""help""": """Number of files to save per JSON output file."""} ) __snake_case : Optional[str] = field(default="""content""" ,metadata={"""help""": """Column containing text data to process."""} ) __snake_case : Optional[float] = field( default=1000 ,metadata={"""help""": """Maximum line length in file, otherwise file is filtered."""} ) __snake_case : Optional[float] = field( default=100 ,metadata={"""help""": """Maximum mean line length in file, otherwise file is filtered."""} ) __snake_case : Optional[float] = field( default=0.25 ,metadata={"""help""": """Maximum fraction of non-alphanumeric characters, otherwise file is filtered."""} ) __snake_case : Optional[float] = field( default=1.5 ,metadata={"""help""": """Minimum character token ratio for the file, otherwise file is filtered."""} ) __snake_case : Optional[float] = field( default=0.7 ,metadata={"""help""": """Probability for filtering config, test and uncommon files."""} ) __snake_case : Optional[str] = field( default="""codeparrot/codeparrot""" ,metadata={"""help""": """Name or path to the tokenizer."""} ,) __snake_case : Optional[bool] = field( default=snake_case__ ,metadata={"""help""": """If True, near-duplicate samples are removed."""} ) __snake_case : Optional[float] = field( default=0.85 ,metadata={"""help""": """Jaccard threshold for near-duplicate samples."""} ) @dataclass class __a : __snake_case : Optional[str] = field( default="""gpt2""" ,metadata={"""help""": """Base tokenizer to build new tokenizer from."""} ) __snake_case : Optional[str] = field( default="""transformersbook/codeparrot-train""" ,metadata={"""help""": """Dataset to train tokenizer on."""} ) __snake_case : Optional[str] = field(default="""content""" ,metadata={"""help""": """Column containing text data to process."""} ) __snake_case : Optional[int] = field(default=20_0000 ,metadata={"""help""": """Number of examples to train tokenizer on."""} ) __snake_case : Optional[int] = field( default=3_2768 ,metadata={"""help""": """Number of examples to train the tokenizer on."""} ) __snake_case : Optional[str] = field(default="""codeparrot""" ,metadata={"""help""": """Name of new tokenizer."""} ) __snake_case : Optional[bool] = field(default=snake_case__ ,metadata={"""help""": """Push saved tokenizer to the hub."""} ) @dataclass class __a : __snake_case : Optional[str] = field( default="""codeparrot/codeparrot""" ,metadata={"""help""": """Name or path to the tokenizer."""} ) __snake_case : Optional[str] = field( default="""codeparrot/codeparrot-clean-train""" ,metadata={"""help""": """Name or path to the dataset to pretokenize."""} ) __snake_case : Optional[str] = field( default="""tokenized-codeparrot-train""" ,metadata={"""help""": """Repo name of the pretokenized data."""} ) __snake_case : Optional[int] = field(default=snake_case__ ,metadata={"""help""": """Number of workers used for code evaluation."""} ) @dataclass class __a : __snake_case : Optional[str] = field( default="""gpt2-large""" ,metadata={"""help""": """Configuration to use for model initialization."""} ) __snake_case : Optional[str] = field( default="""codeparrot/codeparrot""" ,metadata={"""help""": """Tokenizer attached to model."""} ) __snake_case : Optional[str] = field(default="""codeparrot""" ,metadata={"""help""": """Name of the created model."""} ) __snake_case : Optional[bool] = field(default=snake_case__ ,metadata={"""help""": """Push saved tokenizer to the hub."""} )
600
import collections import inspect import unittest from transformers import FocalNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, ) from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCAmelCase_ : """simple docstring""" def __init__( self: int , _UpperCAmelCase: Any , _UpperCAmelCase: Tuple=13 , _UpperCAmelCase: Optional[Any]=32 , _UpperCAmelCase: List[Any]=2 , _UpperCAmelCase: Optional[int]=3 , _UpperCAmelCase: Optional[int]=16 , _UpperCAmelCase: Optional[Any]=[32, 64, 128] , _UpperCAmelCase: Optional[int]=[1, 2, 1] , _UpperCAmelCase: int=[2, 2, 4] , _UpperCAmelCase: List[str]=2 , _UpperCAmelCase: Dict=2.0 , _UpperCAmelCase: Optional[int]=True , _UpperCAmelCase: str=0.0 , _UpperCAmelCase: int=0.0 , _UpperCAmelCase: str=0.1 , _UpperCAmelCase: Dict="gelu" , _UpperCAmelCase: Optional[Any]=False , _UpperCAmelCase: Union[str, Any]=True , _UpperCAmelCase: Union[str, Any]=0.0_2 , _UpperCAmelCase: Optional[int]=1e-5 , _UpperCAmelCase: Optional[int]=True , _UpperCAmelCase: Optional[Any]=None , _UpperCAmelCase: Tuple=True , _UpperCAmelCase: str=10 , _UpperCAmelCase: int=8 , _UpperCAmelCase: List[Any]=["stage1", "stage2"] , _UpperCAmelCase: List[Any]=[1, 2] , ): _lowerCAmelCase :Optional[int] = parent _lowerCAmelCase :Dict = batch_size _lowerCAmelCase :Optional[Any] = image_size _lowerCAmelCase :Optional[Any] = patch_size _lowerCAmelCase :List[Any] = num_channels _lowerCAmelCase :Optional[int] = embed_dim _lowerCAmelCase :List[str] = hidden_sizes _lowerCAmelCase :Union[str, Any] = depths _lowerCAmelCase :int = num_heads _lowerCAmelCase :Any = window_size _lowerCAmelCase :List[Any] = mlp_ratio _lowerCAmelCase :Optional[int] = qkv_bias _lowerCAmelCase :Union[str, Any] = hidden_dropout_prob _lowerCAmelCase :Optional[int] = attention_probs_dropout_prob _lowerCAmelCase :Dict = drop_path_rate _lowerCAmelCase :List[Any] = hidden_act _lowerCAmelCase :Tuple = use_absolute_embeddings _lowerCAmelCase :Optional[int] = patch_norm _lowerCAmelCase :Optional[Any] = layer_norm_eps _lowerCAmelCase :Union[str, Any] = initializer_range _lowerCAmelCase :List[str] = is_training _lowerCAmelCase :str = scope _lowerCAmelCase :Optional[int] = use_labels _lowerCAmelCase :List[Any] = type_sequence_label_size _lowerCAmelCase :Union[str, Any] = encoder_stride _lowerCAmelCase :Optional[int] = out_features _lowerCAmelCase :List[str] = out_indices def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): _lowerCAmelCase :Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _lowerCAmelCase :Dict = None if self.use_labels: _lowerCAmelCase :List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _lowerCAmelCase :str = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE__ ( self: int ): return FocalNetConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Tuple ): _lowerCAmelCase :List[Any] = FocalNetModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _lowerCAmelCase :List[str] = model(_UpperCAmelCase ) _lowerCAmelCase :Union[str, Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) _lowerCAmelCase :List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Optional[Any] ): _lowerCAmelCase :Union[str, Any] = FocalNetBackbone(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _lowerCAmelCase :str = model(_UpperCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] ) # verify backbone works with out_features=None _lowerCAmelCase :Optional[int] = None _lowerCAmelCase :Dict = FocalNetBackbone(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _lowerCAmelCase :Any = model(_UpperCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: int , _UpperCAmelCase: Optional[Any] ): _lowerCAmelCase :Any = FocalNetForMaskedImageModeling(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _lowerCAmelCase :str = model(_UpperCAmelCase ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images _lowerCAmelCase :List[Any] = 1 _lowerCAmelCase :List[Any] = FocalNetForMaskedImageModeling(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _lowerCAmelCase :Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _lowerCAmelCase :int = model(_UpperCAmelCase ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: int , _UpperCAmelCase: Dict , _UpperCAmelCase: Optional[int] ): _lowerCAmelCase :Union[str, Any] = self.type_sequence_label_size _lowerCAmelCase :Dict = FocalNetForImageClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _lowerCAmelCase :Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _lowerCAmelCase :Optional[int] = 1 _lowerCAmelCase :Tuple = FocalNetForImageClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() _lowerCAmelCase :Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _lowerCAmelCase :List[str] = model(_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): _lowerCAmelCase :Tuple = self.prepare_config_and_inputs() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :str = config_and_inputs _lowerCAmelCase :List[str] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCAmelCase_ (snake_case__ , snake_case__ , unittest.TestCase ): """simple docstring""" lowerCamelCase : Optional[int] = ( ( FocalNetModel, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetBackbone, ) if is_torch_available() else () ) lowerCamelCase : Optional[Any] = ( {'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification} if is_torch_available() else {} ) lowerCamelCase : Tuple = False lowerCamelCase : Union[str, Any] = False lowerCamelCase : Union[str, Any] = False lowerCamelCase : Any = False lowerCamelCase : List[Any] = False def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): _lowerCAmelCase :Tuple = FocalNetModelTester(self ) _lowerCAmelCase :str = ConfigTester(self , config_class=_UpperCAmelCase , embed_dim=37 , has_text_modality=_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: List[str] ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ): return def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): _lowerCAmelCase :List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: int ): _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: List[str] ): _lowerCAmelCase :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: str ): _lowerCAmelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase ) @unittest.skip(reason='FocalNet does not use inputs_embeds' ) def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): pass @unittest.skip(reason='FocalNet does not use feedforward chunking' ) def SCREAMING_SNAKE_CASE__ ( self: str ): pass def SCREAMING_SNAKE_CASE__ ( self: Tuple ): _lowerCAmelCase , _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: _lowerCAmelCase :Optional[Any] = model_class(_UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _lowerCAmelCase :Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) ) def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ): _lowerCAmelCase , _lowerCAmelCase :Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: _lowerCAmelCase :Tuple = model_class(_UpperCAmelCase ) _lowerCAmelCase :Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _lowerCAmelCase :int = [*signature.parameters.keys()] _lowerCAmelCase :List[str] = ['pixel_values'] self.assertListEqual(arg_names[:1] , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Any , _UpperCAmelCase: int , _UpperCAmelCase: Union[str, Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Optional[int] ): _lowerCAmelCase :Union[str, Any] = model_class(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() with torch.no_grad(): _lowerCAmelCase :Optional[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) ) _lowerCAmelCase :List[Any] = outputs.hidden_states _lowerCAmelCase :str = getattr( self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase ) # FocalNet has a different seq_length _lowerCAmelCase :Any = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _lowerCAmelCase :List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) _lowerCAmelCase :List[str] = outputs.reshaped_hidden_states self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :int = reshaped_hidden_states[0].shape _lowerCAmelCase :Optional[int] = ( reshaped_hidden_states[0].view(_UpperCAmelCase , _UpperCAmelCase , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def SCREAMING_SNAKE_CASE__ ( self: List[Any] ): _lowerCAmelCase , _lowerCAmelCase :Any = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase :List[str] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes[:-1]: _lowerCAmelCase :Optional[int] = True self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCAmelCase :Dict = True self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ): _lowerCAmelCase , _lowerCAmelCase :str = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase :str = 3 _lowerCAmelCase :Union[str, Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) _lowerCAmelCase :int = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) _lowerCAmelCase :Tuple = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) _lowerCAmelCase :Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes[:-1]: _lowerCAmelCase :List[str] = True self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _lowerCAmelCase :Union[str, Any] = True self.check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , (padded_height, padded_width) ) @slow def SCREAMING_SNAKE_CASE__ ( self: int ): for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _lowerCAmelCase :List[Any] = FocalNetModel.from_pretrained(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Tuple ): _lowerCAmelCase , _lowerCAmelCase :int = self.model_tester.prepare_config_and_inputs_for_common() _lowerCAmelCase :Optional[int] = _config_zero_init(_UpperCAmelCase ) for model_class in self.all_model_classes: _lowerCAmelCase :str = model_class(config=_UpperCAmelCase ) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @require_vision @require_torch class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" @cached_property def SCREAMING_SNAKE_CASE__ ( self: Dict ): # TODO update organization return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None @slow def SCREAMING_SNAKE_CASE__ ( self: Any ): _lowerCAmelCase :Tuple = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(_UpperCAmelCase ) _lowerCAmelCase :Union[str, Any] = self.default_image_processor _lowerCAmelCase :Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) _lowerCAmelCase :Any = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase ) # forward pass with torch.no_grad(): _lowerCAmelCase :Dict = model(**_UpperCAmelCase ) # verify the logits _lowerCAmelCase :str = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _UpperCAmelCase ) _lowerCAmelCase :Dict = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(_UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) ) self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 ) @require_torch class UpperCAmelCase_ (snake_case__ , unittest.TestCase ): """simple docstring""" lowerCamelCase : int = (FocalNetBackbone,) if is_torch_available() else () lowerCamelCase : str = FocalNetConfig lowerCamelCase : Union[str, Any] = False def SCREAMING_SNAKE_CASE__ ( self: List[Any] ): _lowerCAmelCase :Any = FocalNetModelTester(self )
687
0
'''simple docstring''' import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel from ...utils import logging lowercase =logging.get_logger(__name__) def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] ): '''simple docstring''' _UpperCAmelCase : Optional[Any] =nn.functional.normalize(__lowerCamelCase ) _UpperCAmelCase : List[str] =nn.functional.normalize(__lowerCamelCase ) return torch.mm(__lowerCamelCase , normalized_text_embeds.t() ) class __magic_name__ ( snake_case__ ): UpperCAmelCase =CLIPConfig UpperCAmelCase =['CLIPEncoderLayer'] def __init__( self , snake_case) -> Optional[int]: '''simple docstring''' super().__init__(_UpperCAmelCase) _UpperCAmelCase : Any =CLIPVisionModel(config.vision_config) _UpperCAmelCase : Optional[int] =nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=_UpperCAmelCase) _UpperCAmelCase : int =nn.Parameter(torch.ones(1_7 , config.projection_dim) , requires_grad=_UpperCAmelCase) _UpperCAmelCase : Any =nn.Parameter(torch.ones(3 , config.projection_dim) , requires_grad=_UpperCAmelCase) _UpperCAmelCase : str =nn.Parameter(torch.ones(1_7) , requires_grad=_UpperCAmelCase) _UpperCAmelCase : Optional[int] =nn.Parameter(torch.ones(3) , requires_grad=_UpperCAmelCase) @torch.no_grad() def lowerCAmelCase ( self , snake_case , snake_case) -> Optional[int]: '''simple docstring''' _UpperCAmelCase : str =self.vision_model(_UpperCAmelCase)[1] # pooled_output _UpperCAmelCase : Union[str, Any] =self.visual_projection(_UpperCAmelCase) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 _UpperCAmelCase : Optional[int] =cosine_distance(_UpperCAmelCase , self.special_care_embeds).cpu().float().numpy() _UpperCAmelCase : List[str] =cosine_distance(_UpperCAmelCase , self.concept_embeds).cpu().float().numpy() _UpperCAmelCase : str =[] _UpperCAmelCase : List[Any] =image_embeds.shape[0] for i in range(_UpperCAmelCase): _UpperCAmelCase : Optional[Any] ={'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []} # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign images _UpperCAmelCase : List[Any] =0.0 for concept_idx in range(len(special_cos_dist[0])): _UpperCAmelCase : List[Any] =special_cos_dist[i][concept_idx] _UpperCAmelCase : Dict =self.special_care_embeds_weights[concept_idx].item() _UpperCAmelCase : List[Any] =round(concept_cos - concept_threshold + adjustment , 3) if result_img["special_scores"][concept_idx] > 0: result_img["special_care"].append({concept_idx, result_img['special_scores'][concept_idx]}) _UpperCAmelCase : Any =0.01 for concept_idx in range(len(cos_dist[0])): _UpperCAmelCase : Union[str, Any] =cos_dist[i][concept_idx] _UpperCAmelCase : str =self.concept_embeds_weights[concept_idx].item() _UpperCAmelCase : str =round(concept_cos - concept_threshold + adjustment , 3) if result_img["concept_scores"][concept_idx] > 0: result_img["bad_concepts"].append(_UpperCAmelCase) result.append(_UpperCAmelCase) _UpperCAmelCase : Any =[len(res['bad_concepts']) > 0 for res in result] return images, has_nsfw_concepts @torch.no_grad() def lowerCAmelCase ( self , snake_case , snake_case) -> Optional[int]: '''simple docstring''' _UpperCAmelCase : Optional[int] =self.vision_model(_UpperCAmelCase)[1] # pooled_output _UpperCAmelCase : Union[str, Any] =self.visual_projection(_UpperCAmelCase) _UpperCAmelCase : Dict =cosine_distance(_UpperCAmelCase , self.special_care_embeds) _UpperCAmelCase : List[str] =cosine_distance(_UpperCAmelCase , self.concept_embeds) # increase this value to create a stronger `nsfw` filter # at the cost of increasing the possibility of filtering benign images _UpperCAmelCase : Any =0.0 _UpperCAmelCase : Union[str, Any] =special_cos_dist - self.special_care_embeds_weights + adjustment # special_scores = special_scores.round(decimals=3) _UpperCAmelCase : Tuple =torch.any(special_scores > 0 , dim=1) _UpperCAmelCase : List[str] =special_care * 0.01 _UpperCAmelCase : Any =special_adjustment.unsqueeze(1).expand(-1 , cos_dist.shape[1]) _UpperCAmelCase : Optional[Any] =(cos_dist - self.concept_embeds_weights) + special_adjustment # concept_scores = concept_scores.round(decimals=3) _UpperCAmelCase : List[str] =torch.any(concept_scores > 0 , dim=1) return images, has_nsfw_concepts
446
import random import torch from huggingface_hub import HfApi from diffusers import UNetaDModel a = HfApi() a = {} # fmt: off a = torch.tensor([ -0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7, 1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9, -1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9, 0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7 ]) a = torch.tensor([ -2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6, 1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8, -2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8, 2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5 ]) a = torch.tensor([ -0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9, -0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4, -0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5, 0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3 ]) a = torch.tensor([ 0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2, -0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9, 0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5, -0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5 ]) a = torch.tensor([ 0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3, -0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5, 0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9, -0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6 ]) a = torch.tensor([ 0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8, -0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0, 0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3, -0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1 ]) a = torch.tensor([ 0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2, -0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8, 0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4, -0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0 ]) a = torch.tensor([ 0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2, -0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0, 0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6, -0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3 ]) a = torch.tensor([ -1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0, 1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3, -2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0, 1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1]) a = torch.tensor([ -1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4, 0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1, -2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9, 1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6 ]) a = torch.tensor([ -1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2, 0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7, -2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1, 1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5 ]) a = torch.tensor([ -2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9, 1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1, -3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1, 3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6 ]) a = torch.tensor([ -2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0, 1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8, -2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5, 2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3 ]) a = torch.tensor([ -2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6, 1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8, -3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0, 3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3 ]) a = torch.tensor([ -1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4, 1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1, -2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9, 1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9 ]) # fmt: on a = api.list_models(filter="""diffusers""") for mod in models: if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256": a = """/home/patrick/google_checkpoints/""" + mod.modelId.split("""/""")[-1] print(F'''Started running {mod.modelId}!!!''') if mod.modelId.startswith("""CompVis"""): a = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""") else: a = UNetaDModel.from_pretrained(local_checkpoint) torch.manual_seed(0) random.seed(0) a = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) a = torch.tensor([10] * noise.shape[0]) with torch.no_grad(): a = model(noise, time_step).sample assert torch.allclose( logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1E-3 ) print(F'''{mod.modelId} has passed successfully!!!''')
687
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available lowerCAmelCase__ : str ={ 'configuration_gpt_neo': ['GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoConfig', 'GPTNeoOnnxConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : List[str] =[ 'GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST', 'GPTNeoForCausalLM', 'GPTNeoForQuestionAnswering', 'GPTNeoForSequenceClassification', 'GPTNeoForTokenClassification', 'GPTNeoModel', 'GPTNeoPreTrainedModel', 'load_tf_weights_in_gpt_neo', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ : Union[str, Any] =[ 'FlaxGPTNeoForCausalLM', 'FlaxGPTNeoModel', 'FlaxGPTNeoPreTrainedModel', ] if TYPE_CHECKING: from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neo import ( GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, GPTNeoPreTrainedModel, load_tf_weights_in_gpt_neo, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel else: import sys lowerCAmelCase__ : int =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
101
import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE__ ( self: int ): _lowerCAmelCase :Optional[int] = 10 def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): _lowerCAmelCase :str = [1, 2, 3, 4] _lowerCAmelCase :Union[str, Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: int ): _lowerCAmelCase :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] _lowerCAmelCase :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ): _lowerCAmelCase :Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] _lowerCAmelCase :Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: List[str] ): _lowerCAmelCase :List[str] = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.' _lowerCAmelCase , _lowerCAmelCase :Optional[Any] = process_story(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , [] ) def SCREAMING_SNAKE_CASE__ ( self: Any ): _lowerCAmelCase :Optional[int] = '' _lowerCAmelCase , _lowerCAmelCase :str = process_story(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , [] ) self.assertEqual(_UpperCAmelCase , [] ) def SCREAMING_SNAKE_CASE__ ( self: str ): _lowerCAmelCase :Optional[Any] = ( 'It was the year of Our Lord one thousand seven hundred and ' 'seventy-five\n\nSpiritual revelations were conceded to England ' 'at that favoured period, as at this.\n@highlight\n\nIt was the best of times' ) _lowerCAmelCase , _lowerCAmelCase :Optional[int] = process_story(_UpperCAmelCase ) _lowerCAmelCase :Optional[Any] = [ 'It was the year of Our Lord one thousand seven hundred and seventy-five.', 'Spiritual revelations were conceded to England at that favoured period, as at this.', ] self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) _lowerCAmelCase :Optional[int] = ['It was the best of times.'] self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Tuple ): _lowerCAmelCase :Union[str, Any] = torch.tensor([1, 2, 3, 4] ) _lowerCAmelCase :List[Any] = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 0 ).numpy() , expected.numpy() ) def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ): _lowerCAmelCase :List[Any] = torch.tensor([1, 2, 3, 4, 23, 23, 23] ) _lowerCAmelCase :Optional[int] = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 23 ).numpy() , expected.numpy() ) def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): _lowerCAmelCase :Tuple = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) _lowerCAmelCase :List[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 1 ).numpy() , expected.numpy() ) def SCREAMING_SNAKE_CASE__ ( self: str ): _lowerCAmelCase :List[str] = 101 _lowerCAmelCase :Dict = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] ) _lowerCAmelCase :int = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) _lowerCAmelCase :List[str] = compute_token_type_ids(_UpperCAmelCase , _UpperCAmelCase ) np.testing.assert_array_equal(_UpperCAmelCase , _UpperCAmelCase )
687
0
'''simple docstring''' def _a (lowercase__ : int = 1_0 , lowercase__ : int = 2_2 ) -> Dict: """simple docstring""" __snake_case = range(1 , lowercase__ ) __snake_case = range(1 , lowercase__ ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(f'''{solution(10, 22) = }''')
56
def UpperCamelCase_( __magic_name__ : int ): """simple docstring""" return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number if __name__ == "__main__": print("""Program to check whether a number is a Perfect number or not...""") a = int(input("""Enter number: """).strip()) print(F'''{number} is {'' if perfect(number) else 'not '}a Perfect Number.''')
687
0
# Author: OMKAR PATHAK, Nwachukwu Chidiebere # Use a Python dictionary to construct the graph. from __future__ import annotations from pprint import pformat from typing import Generic, TypeVar SCREAMING_SNAKE_CASE = TypeVar('T') class A_ ( Generic[T] ): '''simple docstring''' def __init__( self , _A = True) -> str: """simple docstring""" _UpperCAmelCase : dict[T, list[T]] = {} # dictionary of lists _UpperCAmelCase : int = directed def snake_case__ ( self , _A , _A) -> Tuple: """simple docstring""" if not self.directed: # For undirected graphs # if both source vertex and destination vertex are both present in the # adjacency list, add destination vertex to source vertex list of adjacent # vertices and add source vertex to destination vertex list of adjacent # vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(_UpperCAmelCase) self.adj_list[destination_vertex].append(_UpperCAmelCase) # if only source vertex is present in adjacency list, add destination vertex # to source vertex list of adjacent vertices, then create a new vertex with # destination vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(_UpperCAmelCase) _UpperCAmelCase : Dict = [source_vertex] # if only destination vertex is present in adjacency list, add source vertex # to destination vertex list of adjacent vertices, then create a new vertex # with source vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif destination_vertex in self.adj_list: self.adj_list[destination_vertex].append(_UpperCAmelCase) _UpperCAmelCase : Optional[Any] = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and assign a list # containing the destination vertex as it's first adjacent vertex also # create a new vertex with destination vertex as key and assign a list # containing the source vertex as it's first adjacent vertex. else: _UpperCAmelCase : Tuple = [destination_vertex] _UpperCAmelCase : Tuple = [source_vertex] else: # For directed graphs # if both source vertex and destination vertex are present in adjacency # list, add destination vertex to source vertex list of adjacent vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(_UpperCAmelCase) # if only source vertex is present in adjacency list, add destination # vertex to source vertex list of adjacent vertices and create a new vertex # with destination vertex as key, which has no adjacent vertex elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(_UpperCAmelCase) _UpperCAmelCase : Union[str, Any] = [] # if only destination vertex is present in adjacency list, create a new # vertex with source vertex as key and assign a list containing destination # vertex as first adjacent vertex elif destination_vertex in self.adj_list: _UpperCAmelCase : int = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and a list containing # destination vertex as it's first adjacent vertex. Then create a new vertex # with destination vertex as key, which has no adjacent vertex else: _UpperCAmelCase : Any = [destination_vertex] _UpperCAmelCase : List[Any] = [] return self def __repr__( self) -> int: """simple docstring""" return pformat(self.adj_list)
485
from __future__ import annotations from collections.abc import MutableSequence class UpperCAmelCase_ : """simple docstring""" def __init__( self: List[Any] , _UpperCAmelCase: int , _UpperCAmelCase: MutableSequence[float] ): if len(_UpperCAmelCase ) != degree + 1: raise ValueError( 'The number of coefficients should be equal to the degree + 1.' ) _lowerCAmelCase :list[float] = list(_UpperCAmelCase ) _lowerCAmelCase :Optional[Any] = degree def __add__( self: str , _UpperCAmelCase: Polynomial ): if self.degree > polynomial_a.degree: _lowerCAmelCase :Any = self.coefficients[:] for i in range(polynomial_a.degree + 1 ): coefficients[i] += polynomial_a.coefficients[i] return Polynomial(self.degree , _UpperCAmelCase ) else: _lowerCAmelCase :List[Any] = polynomial_a.coefficients[:] for i in range(self.degree + 1 ): coefficients[i] += self.coefficients[i] return Polynomial(polynomial_a.degree , _UpperCAmelCase ) def __sub__( self: str , _UpperCAmelCase: Polynomial ): return self + polynomial_a * Polynomial(0 , [-1] ) def __neg__( self: Union[str, Any] ): return Polynomial(self.degree , [-c for c in self.coefficients] ) def __mul__( self: int , _UpperCAmelCase: Polynomial ): _lowerCAmelCase :list[float] = [0] * (self.degree + polynomial_a.degree + 1) for i in range(self.degree + 1 ): for j in range(polynomial_a.degree + 1 ): coefficients[i + j] += ( self.coefficients[i] * polynomial_a.coefficients[j] ) return Polynomial(self.degree + polynomial_a.degree , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Tuple , _UpperCAmelCase: int | float ): _lowerCAmelCase :int | float = 0 for i in range(self.degree + 1 ): result += self.coefficients[i] * (substitution**i) return result def __str__( self: Union[str, Any] ): _lowerCAmelCase :Dict = '' for i in range(self.degree , -1 , -1 ): if self.coefficients[i] == 0: continue elif self.coefficients[i] > 0: if polynomial: polynomial += " + " else: polynomial += " - " if i == 0: polynomial += str(abs(self.coefficients[i] ) ) elif i == 1: polynomial += str(abs(self.coefficients[i] ) ) + "x" else: polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_UpperCAmelCase ) return polynomial def __repr__( self: Optional[Any] ): return self.__str__() def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ): _lowerCAmelCase :list[float] = [0] * self.degree for i in range(self.degree ): _lowerCAmelCase :Tuple = self.coefficients[i + 1] * (i + 1) return Polynomial(self.degree - 1 , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] , _UpperCAmelCase: int | float = 0 ): _lowerCAmelCase :list[float] = [0] * (self.degree + 2) _lowerCAmelCase :str = constant for i in range(self.degree + 1 ): _lowerCAmelCase :List[str] = self.coefficients[i] / (i + 1) return Polynomial(self.degree + 1 , _UpperCAmelCase ) def __eq__( self: List[Any] , _UpperCAmelCase: object ): if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): return False if self.degree != polynomial_a.degree: return False for i in range(self.degree + 1 ): if self.coefficients[i] != polynomial_a.coefficients[i]: return False return True def __ne__( self: Optional[Any] , _UpperCAmelCase: object ): return not self.__eq__(_UpperCAmelCase )
687
0
"""simple docstring""" def UpperCAmelCase ( A : float ): '''simple docstring''' return 10 - x * x def UpperCAmelCase ( A : float , A : float ): '''simple docstring''' if equation(A ) * equation(A ) >= 0: raise ValueError('Wrong space!' ) _UpperCAmelCase = a while (b - a) >= 0.01: # Find middle point _UpperCAmelCase = (a + b) / 2 # Check if middle point is root if equation(A ) == 0.0: break # Decide the side to repeat the steps if equation(A ) * equation(A ) < 0: _UpperCAmelCase = c else: _UpperCAmelCase = c return c if __name__ == "__main__": import doctest doctest.testmod() print(bisection(-2, 5)) print(bisection(0, 6))
573
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available a = { """configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ """GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""", """GPTNeoForCausalLM""", """GPTNeoForQuestionAnswering""", """GPTNeoForSequenceClassification""", """GPTNeoForTokenClassification""", """GPTNeoModel""", """GPTNeoPreTrainedModel""", """load_tf_weights_in_gpt_neo""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a = [ """FlaxGPTNeoForCausalLM""", """FlaxGPTNeoModel""", """FlaxGPTNeoPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neo import ( GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoForCausalLM, GPTNeoForQuestionAnswering, GPTNeoForSequenceClassification, GPTNeoForTokenClassification, GPTNeoModel, GPTNeoPreTrainedModel, load_tf_weights_in_gpt_neo, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel else: import sys a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
687
0
from __future__ import annotations from math import pi, sqrt def __lowerCAmelCase ( A_ : float , A_ : float ) -> Optional[int]: if inductance <= 0: raise ValueError("Inductance cannot be 0 or negative" ) elif capacitance <= 0: raise ValueError("Capacitance cannot be 0 or negative" ) else: return ( "Resonant frequency", float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ), ) if __name__ == "__main__": import doctest doctest.testmod()
221
from __future__ import annotations from decimal import Decimal from math import * # noqa: F403 from sympy import diff def UpperCamelCase_( __magic_name__ : str , __magic_name__ : float | Decimal , __magic_name__ : float = 10**-10 ): """simple docstring""" _lowerCAmelCase :Optional[Any] = a while True: _lowerCAmelCase :str = Decimal(__magic_name__ ) - ( Decimal(eval(__magic_name__ ) ) / Decimal(eval(str(diff(__magic_name__ ) ) ) ) # noqa: S307 ) # This number dictates the accuracy of the answer if abs(eval(__magic_name__ ) ) < precision: # noqa: S307 return float(__magic_name__ ) # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''') # Find root of polynomial print(F'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}''') # Find Square Root of 5 print(F'''The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}''') # Exponential Roots print(F'''The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}''')
687
0
'''simple docstring''' import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging SCREAMING_SNAKE_CASE = logging.get_logger(__name__) logging.set_verbosity_info() def lowercase_ ( __A : str , __A : str ) -> Optional[Any]: """simple docstring""" if "xprophetnet" in prophetnet_checkpoint_path: lowercase : str =XLMProphetNetForConditionalGenerationOld.from_pretrained(__A ) lowercase : Union[str, Any] =XLMProphetNetForConditionalGeneration.from_pretrained( __A , output_loading_info=__A ) else: lowercase : str =ProphetNetForConditionalGenerationOld.from_pretrained(__A ) lowercase : Dict =ProphetNetForConditionalGeneration.from_pretrained( __A , output_loading_info=__A ) lowercase : Any =['key_proj', 'value_proj', 'query_proj'] lowercase : Optional[Any] ={ 'self_attn': 'ngram_self_attn', 'cross_attn': 'encoder_attn', 'cross_attn_layer_norm': 'encoder_attn_layer_norm', 'feed_forward_layer_norm': 'final_layer_norm', 'feed_forward': '', 'intermediate': 'fc1', 'output': 'fc2', 'key_proj': 'k_proj', 'query_proj': 'q_proj', 'value_proj': 'v_proj', 'word_embeddings': 'embed_tokens', 'embeddings_layer_norm': 'emb_layer_norm', 'relative_pos_embeddings': 'relative_linear', 'ngram_embeddings': 'ngram_input_embed', 'position_embeddings': 'embed_positions', } for key in loading_info["missing_keys"]: lowercase : str =key.split('''.''' ) if attributes[0] == "lm_head": lowercase : Optional[Any] =prophet lowercase : Dict =prophet_old else: lowercase : Tuple =prophet.prophetnet lowercase : int =prophet_old.model lowercase : Dict =False for attribute in attributes: if attribute in mapping: lowercase : Tuple =mapping[attribute] if not hasattr(__A , __A ) and len(__A ) > 0: lowercase : Tuple =attribute elif hasattr(__A , __A ): lowercase : Dict =attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" lowercase : str =old_model.weight logger.info(F'{attribute} is initialized.' ) lowercase : Optional[int] =True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" lowercase : str =old_model.bias logger.info(F'{attribute} is initialized' ) lowercase : int =True break elif attribute in special_keys and hasattr(__A , '''in_proj_weight''' ): lowercase : Optional[Any] =old_model.in_proj_weight.shape[0] // 3 lowercase : Optional[Any] =getattr(__A , __A ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": lowercase : Union[str, Any] =nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) lowercase : str =nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": lowercase : List[str] =nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) lowercase : List[Any] =nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": lowercase : str =nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) lowercase : List[str] =nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) lowercase : Optional[Any] =True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 5_1_2, "We want 512 position_embeddings." lowercase : Optional[int] =nn.Parameter(old_model.embed_positions.weight[:5_1_2, :] ) lowercase : List[str] =True break if attribute.isdigit(): lowercase : int =model[int(__A )] lowercase : Union[str, Any] =old_model[int(__A )] else: lowercase : Dict =getattr(__A , __A ) if old_attribute == "": lowercase : str =old_model else: if not hasattr(__A , __A ): raise ValueError(F'{old_model} does not have {old_attribute}' ) lowercase : str =getattr(__A , __A ) if not is_key_init: raise ValueError(F'{key} was not correctly initialized!' ) print(F'Saving model to {pytorch_dump_folder_path}' ) prophet.save_pretrained(__A ) if __name__ == "__main__": SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument( '--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) SCREAMING_SNAKE_CASE = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
94
import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) a = { """sample_size""": 32, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 2, """num_class_embeds""": 1_000, """block_out_channels""": [32, 64], """attention_head_dim""": 8, """down_block_types""": [ """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """scale_shift""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } a = { """sample_size""": 64, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 3, """num_class_embeds""": 1_000, """block_out_channels""": [192, 192 * 2, 192 * 3, 192 * 4], """attention_head_dim""": 64, """down_block_types""": [ """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """AttnUpBlock2D""", """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """scale_shift""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } a = { """sample_size""": 256, """in_channels""": 3, """out_channels""": 3, """layers_per_block""": 2, """num_class_embeds""": None, """block_out_channels""": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4], """attention_head_dim""": 64, """down_block_types""": [ """ResnetDownsampleBlock2D""", """ResnetDownsampleBlock2D""", """ResnetDownsampleBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", """AttnDownBlock2D""", ], """up_block_types""": [ """AttnUpBlock2D""", """AttnUpBlock2D""", """AttnUpBlock2D""", """ResnetUpsampleBlock2D""", """ResnetUpsampleBlock2D""", """ResnetUpsampleBlock2D""", ], """resnet_time_scale_shift""": """default""", """upsample_type""": """resnet""", """downsample_type""": """resnet""", } a = { """num_train_timesteps""": 40, """sigma_min""": 0.0_0_2, """sigma_max""": 8_0.0, } a = { """num_train_timesteps""": 201, """sigma_min""": 0.0_0_2, """sigma_max""": 8_0.0, } a = { """num_train_timesteps""": 151, """sigma_min""": 0.0_0_2, """sigma_max""": 8_0.0, } def UpperCamelCase_( __magic_name__ : Dict ): """simple docstring""" if isinstance(__magic_name__ , __magic_name__ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError('boolean value expected' ) def UpperCamelCase_( __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[Any]=False ): """simple docstring""" _lowerCAmelCase :int = checkpoint[f"""{old_prefix}.in_layers.0.weight"""] _lowerCAmelCase :Union[str, Any] = checkpoint[f"""{old_prefix}.in_layers.0.bias"""] _lowerCAmelCase :str = checkpoint[f"""{old_prefix}.in_layers.2.weight"""] _lowerCAmelCase :Optional[Any] = checkpoint[f"""{old_prefix}.in_layers.2.bias"""] _lowerCAmelCase :str = checkpoint[f"""{old_prefix}.emb_layers.1.weight"""] _lowerCAmelCase :Any = checkpoint[f"""{old_prefix}.emb_layers.1.bias"""] _lowerCAmelCase :str = checkpoint[f"""{old_prefix}.out_layers.0.weight"""] _lowerCAmelCase :List[Any] = checkpoint[f"""{old_prefix}.out_layers.0.bias"""] _lowerCAmelCase :Optional[int] = checkpoint[f"""{old_prefix}.out_layers.3.weight"""] _lowerCAmelCase :Dict = checkpoint[f"""{old_prefix}.out_layers.3.bias"""] if has_skip: _lowerCAmelCase :List[Any] = checkpoint[f"""{old_prefix}.skip_connection.weight"""] _lowerCAmelCase :int = checkpoint[f"""{old_prefix}.skip_connection.bias"""] return new_checkpoint def UpperCamelCase_( __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : List[str] , __magic_name__ : List[str]=None ): """simple docstring""" _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Tuple = checkpoint[f"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase :Any = checkpoint[f"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 ) _lowerCAmelCase :int = checkpoint[f"""{old_prefix}.norm.weight"""] _lowerCAmelCase :Dict = checkpoint[f"""{old_prefix}.norm.bias"""] _lowerCAmelCase :Dict = weight_q.squeeze(-1 ).squeeze(-1 ) _lowerCAmelCase :str = bias_q.squeeze(-1 ).squeeze(-1 ) _lowerCAmelCase :List[str] = weight_k.squeeze(-1 ).squeeze(-1 ) _lowerCAmelCase :Optional[Any] = bias_k.squeeze(-1 ).squeeze(-1 ) _lowerCAmelCase :Tuple = weight_v.squeeze(-1 ).squeeze(-1 ) _lowerCAmelCase :List[Any] = bias_v.squeeze(-1 ).squeeze(-1 ) _lowerCAmelCase :int = ( checkpoint[f"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 ) ) _lowerCAmelCase :Optional[Any] = checkpoint[f"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def UpperCamelCase_( __magic_name__ : str , __magic_name__ : Optional[Any] ): """simple docstring""" _lowerCAmelCase :Union[str, Any] = torch.load(__magic_name__ , map_location='cpu' ) _lowerCAmelCase :List[Any] = {} _lowerCAmelCase :List[str] = checkpoint['time_embed.0.weight'] _lowerCAmelCase :Tuple = checkpoint['time_embed.0.bias'] _lowerCAmelCase :Dict = checkpoint['time_embed.2.weight'] _lowerCAmelCase :Union[str, Any] = checkpoint['time_embed.2.bias'] if unet_config["num_class_embeds"] is not None: _lowerCAmelCase :Union[str, Any] = checkpoint['label_emb.weight'] _lowerCAmelCase :str = checkpoint['input_blocks.0.0.weight'] _lowerCAmelCase :str = checkpoint['input_blocks.0.0.bias'] _lowerCAmelCase :List[Any] = unet_config['down_block_types'] _lowerCAmelCase :Any = unet_config['layers_per_block'] _lowerCAmelCase :List[Any] = unet_config['attention_head_dim'] _lowerCAmelCase :Tuple = unet_config['block_out_channels'] _lowerCAmelCase :List[str] = 1 _lowerCAmelCase :Optional[int] = channels_list[0] for i, layer_type in enumerate(__magic_name__ ): _lowerCAmelCase :Tuple = channels_list[i] _lowerCAmelCase :Optional[Any] = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(__magic_name__ ): _lowerCAmelCase :int = f"""down_blocks.{i}.resnets.{j}""" _lowerCAmelCase :List[Any] = f"""input_blocks.{current_layer}.0""" _lowerCAmelCase :int = True if j == 0 and downsample_block_has_skip else False _lowerCAmelCase :List[Any] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(__magic_name__ ): _lowerCAmelCase :List[str] = f"""down_blocks.{i}.resnets.{j}""" _lowerCAmelCase :Optional[int] = f"""input_blocks.{current_layer}.0""" _lowerCAmelCase :List[str] = True if j == 0 and downsample_block_has_skip else False _lowerCAmelCase :Optional[int] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ ) _lowerCAmelCase :Optional[int] = f"""down_blocks.{i}.attentions.{j}""" _lowerCAmelCase :str = f"""input_blocks.{current_layer}.1""" _lowerCAmelCase :Optional[Any] = convert_attention( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) current_layer += 1 if i != len(__magic_name__ ) - 1: _lowerCAmelCase :Union[str, Any] = f"""down_blocks.{i}.downsamplers.0""" _lowerCAmelCase :Tuple = f"""input_blocks.{current_layer}.0""" _lowerCAmelCase :Optional[int] = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) current_layer += 1 _lowerCAmelCase :Dict = current_channels # hardcoded the mid-block for now _lowerCAmelCase :int = 'mid_block.resnets.0' _lowerCAmelCase :Optional[Any] = 'middle_block.0' _lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) _lowerCAmelCase :Optional[int] = 'mid_block.attentions.0' _lowerCAmelCase :Optional[int] = 'middle_block.1' _lowerCAmelCase :List[Any] = convert_attention(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) _lowerCAmelCase :Union[str, Any] = 'mid_block.resnets.1' _lowerCAmelCase :Optional[int] = 'middle_block.2' _lowerCAmelCase :int = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) _lowerCAmelCase :Tuple = 0 _lowerCAmelCase :str = unet_config['up_block_types'] for i, layer_type in enumerate(__magic_name__ ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): _lowerCAmelCase :Optional[Any] = f"""up_blocks.{i}.resnets.{j}""" _lowerCAmelCase :Dict = f"""output_blocks.{current_layer}.0""" _lowerCAmelCase :Any = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ ) current_layer += 1 if i != len(__magic_name__ ) - 1: _lowerCAmelCase :Any = f"""up_blocks.{i}.upsamplers.0""" _lowerCAmelCase :Dict = f"""output_blocks.{current_layer-1}.1""" _lowerCAmelCase :Tuple = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): _lowerCAmelCase :Tuple = f"""up_blocks.{i}.resnets.{j}""" _lowerCAmelCase :List[str] = f"""output_blocks.{current_layer}.0""" _lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , has_skip=__magic_name__ ) _lowerCAmelCase :str = f"""up_blocks.{i}.attentions.{j}""" _lowerCAmelCase :List[Any] = f"""output_blocks.{current_layer}.1""" _lowerCAmelCase :int = convert_attention( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) current_layer += 1 if i != len(__magic_name__ ) - 1: _lowerCAmelCase :Optional[int] = f"""up_blocks.{i}.upsamplers.0""" _lowerCAmelCase :int = f"""output_blocks.{current_layer-1}.2""" _lowerCAmelCase :str = convert_resnet(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) _lowerCAmelCase :str = checkpoint['out.0.weight'] _lowerCAmelCase :Union[str, Any] = checkpoint['out.0.bias'] _lowerCAmelCase :List[Any] = checkpoint['out.2.weight'] _lowerCAmelCase :Dict = checkpoint['out.2.bias'] return new_checkpoint if __name__ == "__main__": a = argparse.ArgumentParser() parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""") parser.add_argument( """--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model.""" ) parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""") a = parser.parse_args() a = strabool(args.class_cond) a = os.path.basename(args.unet_path) print(F'''Checkpoint: {ckpt_name}''') # Get U-Net config if "imagenet64" in ckpt_name: a = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): a = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: a = TEST_UNET_CONFIG else: raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''') if not args.class_cond: a = None a = con_pt_to_diffuser(args.unet_path, unet_config) a = UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: a = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: a = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): a = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''') a = CMStochasticIterativeScheduler(**scheduler_config) a = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
687
0
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mvp import MvpTokenizer _lowerCamelCase : List[str] = logging.get_logger(__name__) _lowerCamelCase : Tuple = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} # See all MVP models at https://huggingface.co/models?filter=mvp _lowerCamelCase : int = { "vocab_file": { "RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json", }, "added_tokens.json": { "RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json", }, "merges_file": { "RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt", }, "tokenizer_file": { "RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json", }, } _lowerCamelCase : Tuple = { "RUCAIBox/mvp": 1_0_2_4, } class __snake_case (snake_case__ ): lowerCAmelCase__ = VOCAB_FILES_NAMES lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase__ = ['input_ids', 'attention_mask'] lowerCAmelCase__ = MvpTokenizer def __init__( self : List[Any] , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : List[str]=None , _UpperCAmelCase : Dict="replace" , _UpperCAmelCase : List[Any]="<s>" , _UpperCAmelCase : Tuple="</s>" , _UpperCAmelCase : Any="</s>" , _UpperCAmelCase : Dict="<s>" , _UpperCAmelCase : Any="<unk>" , _UpperCAmelCase : Tuple="<pad>" , _UpperCAmelCase : Any="<mask>" , _UpperCAmelCase : Optional[int]=False , _UpperCAmelCase : List[Any]=True , **_UpperCAmelCase : Any , ) -> Optional[Any]: '''simple docstring''' super().__init__( _UpperCAmelCase , _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , errors=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , trim_offsets=_UpperCAmelCase , **_UpperCAmelCase , ) _lowerCAmelCase : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , _UpperCAmelCase ) != add_prefix_space: _lowerCAmelCase : Tuple = getattr(_UpperCAmelCase , pre_tok_state.pop("""type""" ) ) _lowerCAmelCase : int = add_prefix_space _lowerCAmelCase : str = pre_tok_class(**_UpperCAmelCase ) _lowerCAmelCase : str = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` _lowerCAmelCase : List[str] = 'post_processor' _lowerCAmelCase : Union[str, Any] = getattr(self.backend_tokenizer , _UpperCAmelCase , _UpperCAmelCase ) if tokenizer_component_instance: _lowerCAmelCase : Dict = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: _lowerCAmelCase : Dict = tuple(state["""sep"""] ) if "cls" in state: _lowerCAmelCase : Tuple = tuple(state["""cls"""] ) _lowerCAmelCase : Tuple = False if state.get("""add_prefix_space""" , _UpperCAmelCase ) != add_prefix_space: _lowerCAmelCase : List[Any] = add_prefix_space _lowerCAmelCase : List[str] = True if state.get("""trim_offsets""" , _UpperCAmelCase ) != trim_offsets: _lowerCAmelCase : str = trim_offsets _lowerCAmelCase : List[Any] = True if changes_to_apply: _lowerCAmelCase : str = getattr(_UpperCAmelCase , state.pop("""type""" ) ) _lowerCAmelCase : Any = component_class(**_UpperCAmelCase ) setattr(self.backend_tokenizer , _UpperCAmelCase , _UpperCAmelCase ) @property def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error("""Using mask_token, but it is not set yet.""" ) return None return str(self._mask_token ) @mask_token.setter def SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCAmelCase : Optional[int] ) -> List[Any]: '''simple docstring''' _lowerCAmelCase : Optional[int] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else value _lowerCAmelCase : List[Any] = value def SCREAMING_SNAKE_CASE ( self : Optional[int] , *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : List[Any] ) -> Optional[Any]: '''simple docstring''' _lowerCAmelCase : Tuple = kwargs.get("""is_split_into_words""" , _UpperCAmelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " """to use it with pretokenized inputs.""" ) return super()._batch_encode_plus(*_UpperCAmelCase , **_UpperCAmelCase ) def SCREAMING_SNAKE_CASE ( self : int , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Any ) -> Tuple: '''simple docstring''' _lowerCAmelCase : str = kwargs.get("""is_split_into_words""" , _UpperCAmelCase ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True " """to use it with pretokenized inputs.""" ) return super()._encode_plus(*_UpperCAmelCase , **_UpperCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple: '''simple docstring''' _lowerCAmelCase : Tuple = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase ) return tuple(_UpperCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Any=None ) -> Optional[int]: '''simple docstring''' _lowerCAmelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> str: '''simple docstring''' _lowerCAmelCase : List[Any] = [self.sep_token_id] _lowerCAmelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
429
import os import re import shutil import sys import tempfile import unittest import black a = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. a = """ \"\"\" Output class for the scheduler's step function output. Args: prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the denoising loop. pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): The predicted denoised sample (x_{0}) based on the model output from the current timestep. `pred_original_sample` can be used to preview progress or for guidance. \"\"\" prev_sample: torch.FloatTensor pred_original_sample: Optional[torch.FloatTensor] = None """ class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE__ ( self: Dict ): _lowerCAmelCase :Optional[Any] = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir , 'schedulers/' ) ) _lowerCAmelCase :Tuple = self.diffusers_dir shutil.copy( os.path.join(_UpperCAmelCase , 'src/diffusers/schedulers/scheduling_ddpm.py' ) , os.path.join(self.diffusers_dir , 'schedulers/scheduling_ddpm.py' ) , ) def SCREAMING_SNAKE_CASE__ ( self: List[Any] ): _lowerCAmelCase :str = 'src/diffusers' shutil.rmtree(self.diffusers_dir ) def SCREAMING_SNAKE_CASE__ ( self: List[Any] , _UpperCAmelCase: Tuple , _UpperCAmelCase: Optional[Any] , _UpperCAmelCase: Any , _UpperCAmelCase: Tuple=None ): _lowerCAmelCase :int = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code if overwrite_result is not None: _lowerCAmelCase :Dict = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result _lowerCAmelCase :Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 ) _lowerCAmelCase :List[str] = black.format_str(_UpperCAmelCase , mode=_UpperCAmelCase ) _lowerCAmelCase :Union[str, Any] = os.path.join(self.diffusers_dir , 'new_code.py' ) with open(_UpperCAmelCase , 'w' , newline='\n' ) as f: f.write(_UpperCAmelCase ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(_UpperCAmelCase ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=_UpperCAmelCase ) with open(_UpperCAmelCase , 'r' ) as f: self.assertTrue(f.read() , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ): _lowerCAmelCase :List[str] = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Union[str, Any] ): # Base copy consistency self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , REFERENCE_CODE + '\n' , ) # With no empty line at the end self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' , 'DDPMSchedulerOutput' , _UpperCAmelCase , ) # Copy consistency with rename self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , re.sub('DDPM' , 'Test' , _UpperCAmelCase ) , ) # Copy consistency with a really long name _lowerCAmelCase :Optional[int] = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason' self.check_copy_consistency( f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub('Bert' , _UpperCAmelCase , _UpperCAmelCase ) , ) # Copy consistency with overwrite self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' , 'TestSchedulerOutput' , _UpperCAmelCase , overwrite_result=re.sub('DDPM' , 'Test' , _UpperCAmelCase ) , )
687
0
"""simple docstring""" import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class lowerCAmelCase__ ( unittest.TestCase ): """simple docstring""" def snake_case ( self : int ): # clean up the VRAM after each test super().tearDown() gc.collect() def snake_case ( self : Optional[Any] ): __lowercase : Tuple = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) __lowercase : int = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) __lowercase : List[str] = 'xvjiarui/stable-diffusion-2-inpainting' __lowercase : Tuple = FlaxStableDiffusionInpaintPipeline.from_pretrained(_UpperCAmelCase , safety_checker=_UpperCAmelCase ) __lowercase : List[str] = 'Face of a yellow cat, high resolution, sitting on a park bench' __lowercase : Any = jax.random.PRNGKey(0 ) __lowercase : Any = 5_0 __lowercase : Optional[int] = jax.device_count() __lowercase : List[Any] = num_samples * [prompt] __lowercase : Dict = num_samples * [init_image] __lowercase : Dict = num_samples * [mask_image] __lowercase : List[Any] = pipeline.prepare_inputs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) # shard inputs and rng __lowercase : Any = replicate(_UpperCAmelCase ) __lowercase : Tuple = jax.random.split(_UpperCAmelCase , jax.device_count() ) __lowercase : Union[str, Any] = shard(_UpperCAmelCase ) __lowercase : List[Any] = shard(_UpperCAmelCase ) __lowercase : List[str] = shard(_UpperCAmelCase ) __lowercase : Optional[int] = pipeline( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , jit=_UpperCAmelCase ) __lowercase : Dict = output.images.reshape(_UpperCAmelCase , 5_1_2 , 5_1_2 , 3 ) __lowercase : Optional[Any] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] __lowercase : str = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __lowercase : Optional[int] = jnp.array( [0.3_6_1_1_3_0_7, 0.3_7_6_4_9_7_3_6, 0.3_7_5_7_4_0_8, 0.3_8_2_1_3_9_5_3, 0.3_9_2_9_5_1_6_7, 0.3_8_4_1_6_3_1, 0.4_1_5_5_4_9_7_8, 0.4_1_3_7_4_7_5, 0.4_2_1_7_0_8_4] ) print(f'output_slice: {output_slice}' ) assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
575
from dataclasses import dataclass, field from typing import Optional @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be trained.'} ) lowerCamelCase : Optional[str] = field( default='./' , metadata={'help': 'Save dir where model repo is cloned and models updates are saved to.'} ) lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path of training dataset.'} ) lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} ) lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for training.'} ) lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for evaluation.'} ) lowerCamelCase : Optional[float] = field(default=0.1 , metadata={'help': 'Value of weight decay.'} ) lowerCamelCase : Optional[int] = field( default=1_00_00 , metadata={'help': 'Size of buffer used to shuffle streaming dataset.'} ) lowerCamelCase : Optional[float] = field(default=2e-4 , metadata={'help': 'Learning rate fo training.'} ) lowerCamelCase : Optional[str] = field(default='cosine' , metadata={'help': 'Learning rate.'} ) lowerCamelCase : Optional[int] = field( default=7_50 , metadata={'help': 'Number of warmup steps in the learning rate schedule.'} ) lowerCamelCase : Optional[int] = field( default=16 , metadata={'help': 'Number of gradient accumulation steps.'} ) lowerCamelCase : Optional[bool] = field( default=snake_case__ , metadata={'help': 'Use gradient checkpointing to reduce memory footprint.'} ) lowerCamelCase : Optional[int] = field(default=5_00_00 , metadata={'help': 'Maximum number of training steps.'} ) lowerCamelCase : Optional[int] = field( default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} ) lowerCamelCase : Optional[int] = field(default=10_24 , metadata={'help': 'Sequence lengths used for training.'} ) lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Training seed.'} ) lowerCamelCase : Optional[int] = field( default=10_24 , metadata={'help': 'Interval to save checkpoints. Measured as number of forward passes not training steps.'} , ) lowerCamelCase : Optional[str] = field( default=snake_case__ , metadata={'help': 'States path if the training should continue from a checkpoint folder.'} ) lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'If True the data is pretokenized.'} ) @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} ) lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} ) lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size used for evaluation.'} ) lowerCamelCase : Optional[int] = field( default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} ) lowerCamelCase : Optional[int] = field(default=10_24 , metadata={'help': 'Length of sequences to be evaluated.'} ) lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} ) @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} ) lowerCamelCase : Optional[int] = field(default=snake_case__ , metadata={'help': 'Number of workers used for code evaluation.'} ) lowerCamelCase : Optional[int] = field( default=snake_case__ , metadata={'help': 'The number of human-eval tasks to run. If not included all tasks are evaluated.'} , ) lowerCamelCase : Optional[bool] = field( default=snake_case__ , metadata={'help': 'Sample from the language model\'s output distribution.'} ) lowerCamelCase : Optional[float] = field(default=0.2 , metadata={'help': 'Sampling temperature used for generation.'} ) lowerCamelCase : Optional[int] = field(default=2_56 , metadata={'help': 'Maximum number of newly generated tokens.'} ) lowerCamelCase : Optional[int] = field(default=0 , metadata={'help': 'Top-k parameter used for generation.'} ) lowerCamelCase : Optional[float] = field(default=0.95 , metadata={'help': 'Top-p parameter used for nucleus sampling.'} ) lowerCamelCase : Optional[int] = field(default=10 , metadata={'help': 'Number of generations to run in parallel.'} ) lowerCamelCase : Optional[int] = field( default=2_00 , metadata={'help': 'Number of completions to generate for each sample.'} ) lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} ) lowerCamelCase : Optional[str] = field( default='eval_results.json' , metadata={'help': 'Random seed used for evaluation.'} ) lowerCamelCase : Optional[str] = field( default='0' , metadata={'help': 'Allow `code_eval` to execute Python code on machine'} ) lowerCamelCase : Optional[int] = field( default=-1 , metadata={ 'help': ( 'Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive' ' number corresponds to which GPU device id to run on.' ) } , ) @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : Optional[int] = field( default=snake_case__ , metadata={ 'help': 'The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.' } , ) lowerCamelCase : Optional[str] = field( default='transformersbook/codeparrot' , metadata={'help': 'Folder or name of dataset to process.'} ) lowerCamelCase : Optional[str] = field( default='codeparrot-clean' , metadata={'help': 'Folder to save processed processed dataset.'} ) lowerCamelCase : Optional[int] = field( default=10_00_00 , metadata={'help': 'Number of files to save per JSON output file.'} ) lowerCamelCase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} ) lowerCamelCase : Optional[float] = field( default=10_00 , metadata={'help': 'Maximum line length in file, otherwise file is filtered.'} ) lowerCamelCase : Optional[float] = field( default=1_00 , metadata={'help': 'Maximum mean line length in file, otherwise file is filtered.'} ) lowerCamelCase : Optional[float] = field( default=0.25 , metadata={'help': 'Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'} ) lowerCamelCase : Optional[float] = field( default=1.5 , metadata={'help': 'Minimum character token ratio for the file, otherwise file is filtered.'} ) lowerCamelCase : Optional[float] = field( default=0.7 , metadata={'help': 'Probability for filtering config, test and uncommon files.'} ) lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} , ) lowerCamelCase : Optional[bool] = field( default=snake_case__ , metadata={'help': 'If True, near-duplicate samples are removed.'} ) lowerCamelCase : Optional[float] = field( default=0.85 , metadata={'help': 'Jaccard threshold for near-duplicate samples.'} ) @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : Optional[str] = field( default='gpt2' , metadata={'help': 'Base tokenizer to build new tokenizer from.'} ) lowerCamelCase : Optional[str] = field( default='transformersbook/codeparrot-train' , metadata={'help': 'Dataset to train tokenizer on.'} ) lowerCamelCase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} ) lowerCamelCase : Optional[int] = field(default=20_00_00 , metadata={'help': 'Number of examples to train tokenizer on.'} ) lowerCamelCase : Optional[int] = field( default=3_27_68 , metadata={'help': 'Number of examples to train the tokenizer on.'} ) lowerCamelCase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of new tokenizer.'} ) lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'Push saved tokenizer to the hub.'} ) @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} ) lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path to the dataset to pretokenize.'} ) lowerCamelCase : Optional[str] = field( default='tokenized-codeparrot-train' , metadata={'help': 'Repo name of the pretokenized data.'} ) lowerCamelCase : Optional[int] = field(default=snake_case__ , metadata={'help': 'Number of workers used for code evaluation.'} ) @dataclass class UpperCAmelCase_ : """simple docstring""" lowerCamelCase : Optional[str] = field( default='gpt2-large' , metadata={'help': 'Configuration to use for model initialization.'} ) lowerCamelCase : Optional[str] = field( default='codeparrot/codeparrot' , metadata={'help': 'Tokenizer attached to model.'} ) lowerCamelCase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of the created model.'} ) lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'Push saved tokenizer to the hub.'} )
687
0
"""simple docstring""" import gzip import hashlib import json import multiprocessing import os import re import shutil import time from pathlib import Path import numpy as np from arguments import PreprocessingArguments from datasets import load_dataset from minhash_deduplication import deduplicate_dataset from transformers import AutoTokenizer, HfArgumentParser lowerCAmelCase_ = re.compile(R'''\s+''') def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> str: return {"hash": hashlib.mda(re.sub(__SCREAMING_SNAKE_CASE , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()} def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> List[Any]: _SCREAMING_SNAKE_CASE : Union[str, Any] = [len(__SCREAMING_SNAKE_CASE ) for line in example['content'].splitlines()] return {"line_mean": np.mean(__SCREAMING_SNAKE_CASE ), "line_max": max(__SCREAMING_SNAKE_CASE )} def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> List[str]: _SCREAMING_SNAKE_CASE : Any = np.mean([c.isalnum() for c in example["""content"""]] ) return {"alpha_frac": alpha_frac} def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple: if example["hash"] in uniques: uniques.remove(example["""hash"""] ) return True else: return False def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=5 )-> str: _SCREAMING_SNAKE_CASE : Tuple = ['auto-generated', 'autogenerated', 'automatically generated'] _SCREAMING_SNAKE_CASE : Optional[int] = example['content'].splitlines() for _, line in zip(range(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ): for keyword in keywords: if keyword in line.lower(): return {"autogenerated": True} else: return {"autogenerated": False} def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=0.05 )-> List[Any]: _SCREAMING_SNAKE_CASE : List[Any] = ['unit tests', 'test file', 'configuration file'] _SCREAMING_SNAKE_CASE : Union[str, Any] = example['content'].splitlines() _SCREAMING_SNAKE_CASE : Dict = 0 _SCREAMING_SNAKE_CASE : str = 0 # first test for _, line in zip(range(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ): for keyword in keywords: if keyword in line.lower(): return {"config_or_test": True} # second test _SCREAMING_SNAKE_CASE : str = example['content'].count("""\n""" ) _SCREAMING_SNAKE_CASE : List[str] = int(coeff * nlines ) for line in lines: count_config += line.lower().count("""config""" ) count_test += line.lower().count("""test""" ) if count_config > threshold or count_test > threshold: return {"config_or_test": True} return {"config_or_test": False} def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int: _SCREAMING_SNAKE_CASE : Dict = ['def ', 'class ', 'for ', 'while '] _SCREAMING_SNAKE_CASE : Tuple = example['content'].splitlines() for line in lines: for keyword in keywords: if keyword in line.lower(): return {"has_no_keywords": False} return {"has_no_keywords": True} def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=4 )-> Optional[int]: _SCREAMING_SNAKE_CASE : Optional[int] = example['content'].splitlines() _SCREAMING_SNAKE_CASE : List[Any] = 0 for line in lines: counter += line.lower().count("""=""" ) if counter > minimum: return {"has_few_assignments": False} return {"has_few_assignments": True} def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Any: _SCREAMING_SNAKE_CASE : int = tokenizer(example["""content"""] , truncation=__SCREAMING_SNAKE_CASE )['input_ids'] _SCREAMING_SNAKE_CASE : List[Any] = len(example["""content"""] ) / len(__SCREAMING_SNAKE_CASE ) return {"ratio": ratio} def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[Any]: _SCREAMING_SNAKE_CASE : str = {} results.update(get_hash(__SCREAMING_SNAKE_CASE ) ) results.update(line_stats(__SCREAMING_SNAKE_CASE ) ) results.update(alpha_stats(__SCREAMING_SNAKE_CASE ) ) results.update(char_token_ratio(__SCREAMING_SNAKE_CASE ) ) results.update(is_autogenerated(__SCREAMING_SNAKE_CASE ) ) results.update(is_config_or_test(__SCREAMING_SNAKE_CASE ) ) results.update(has_no_keywords(__SCREAMING_SNAKE_CASE ) ) results.update(has_few_assignments(__SCREAMING_SNAKE_CASE ) ) return results def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any: if not check_uniques(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): return False elif example["autogenerated"]: return False elif example["line_max"] > args.line_max: return False elif example["line_mean"] > args.line_mean: return False elif example["alpha_frac"] < args.alpha_frac: return False elif example["ratio"] < args.min_token_ratio: return False elif example["config_or_test"] and np.random.rand() <= args.filter_proba: return False elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba: return False elif example["has_few_assignments"]: return False else: return True def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[int]: with open(__SCREAMING_SNAKE_CASE , """rb""" ) as f_in: with gzip.open(str(__SCREAMING_SNAKE_CASE ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out: shutil.copyfileobj(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) os.unlink(__SCREAMING_SNAKE_CASE ) # Settings lowerCAmelCase_ = HfArgumentParser(PreprocessingArguments) lowerCAmelCase_ = parser.parse_args() if args.num_workers is None: lowerCAmelCase_ = multiprocessing.cpu_count() lowerCAmelCase_ = AutoTokenizer.from_pretrained(args.tokenizer_dir) # Load dataset lowerCAmelCase_ = time.time() lowerCAmelCase_ = load_dataset(args.dataset_name, split='''train''') print(F"Time to load dataset: {time.time()-t_start:.2f}") # Run preprocessing lowerCAmelCase_ = time.time() lowerCAmelCase_ = ds.map(preprocess, num_proc=args.num_workers) print(F"Time to preprocess dataset: {time.time()-t_start:.2f}") # Deduplicate hashes lowerCAmelCase_ = set(ds.unique('''hash''')) lowerCAmelCase_ = len(uniques) / len(ds) print(F"Fraction of duplicates: {1-frac:.2%}") # Deduplicate data and apply heuristics lowerCAmelCase_ = time.time() lowerCAmelCase_ = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args}) print(F"Time to filter dataset: {time.time()-t_start:.2f}") print(F"Size of filtered dataset: {len(ds_filter)}") # Deduplicate with minhash and jaccard similarity if args.near_deduplication: lowerCAmelCase_ = time.time() lowerCAmelCase_ , lowerCAmelCase_ = deduplicate_dataset(ds_filter, args.jaccard_threshold) print(F"Time to deduplicate dataset: {time.time()-t_start:.2f}") print(F"Size of deduplicate dataset: {len(ds_filter)}") # Save data in batches of samples_per_file lowerCAmelCase_ = Path(args.output_dir) output_dir.mkdir(exist_ok=True) # save duplicate_clusters in the output_dir as artifacts # not sure it is the right place the save it if args.near_deduplication: with open(output_dir / '''duplicate_clusters.json''', '''w''') as f: json.dump(duplicate_clusters, f) lowerCAmelCase_ = output_dir / '''data''' data_dir.mkdir(exist_ok=True) lowerCAmelCase_ = time.time() for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)): lowerCAmelCase_ = str(data_dir / F"file-{file_number+1:012}.json") lowerCAmelCase_ = min(len(ds_filter), index + args.samples_per_file) ds_filter.select(list(range(index, end_index))).to_json(file_path) compress_file(file_path) print(F"Time to save dataset: {time.time()-t_start:.2f}")
338
import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class UpperCAmelCase_ (unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE__ ( self: List[Any] ): _lowerCAmelCase :List[str] = 'ylacombe/bark-small' _lowerCAmelCase :int = tempfile.mkdtemp() _lowerCAmelCase :List[str] = 'en_speaker_1' _lowerCAmelCase :Union[str, Any] = 'This is a test string' _lowerCAmelCase :List[Any] = 'speaker_embeddings_path.json' _lowerCAmelCase :str = 'speaker_embeddings' def SCREAMING_SNAKE_CASE__ ( self: str , **_UpperCAmelCase: Optional[Any] ): return AutoTokenizer.from_pretrained(self.checkpoint , **_UpperCAmelCase ) def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE__ ( self: Tuple ): _lowerCAmelCase :List[Any] = self.get_tokenizer() _lowerCAmelCase :List[str] = BarkProcessor(tokenizer=_UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) _lowerCAmelCase :List[str] = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def SCREAMING_SNAKE_CASE__ ( self: List[str] ): _lowerCAmelCase :List[str] = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) _lowerCAmelCase :Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' ) _lowerCAmelCase :Any = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ): _lowerCAmelCase :Tuple = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) _lowerCAmelCase :List[Any] = 35 _lowerCAmelCase :Optional[int] = 2 _lowerCAmelCase :Dict = 8 _lowerCAmelCase :Dict = { 'semantic_prompt': np.ones(_UpperCAmelCase ), 'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ), 'fine_prompt': np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset _lowerCAmelCase :Dict = processor(text=self.input_string , voice_preset=_UpperCAmelCase ) _lowerCAmelCase :List[Any] = inputs['history_prompt'] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() ) # test loading voice preset from npz file _lowerCAmelCase :int = os.path.join(self.tmpdirname , 'file.npz' ) np.savez(_UpperCAmelCase , **_UpperCAmelCase ) _lowerCAmelCase :Dict = processor(text=self.input_string , voice_preset=_UpperCAmelCase ) _lowerCAmelCase :Optional[int] = inputs['history_prompt'] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_UpperCAmelCase , np.array([] ) ).tolist() ) # test loading voice preset from the hub _lowerCAmelCase :Tuple = processor(text=self.input_string , voice_preset=self.voice_preset ) def SCREAMING_SNAKE_CASE__ ( self: List[Any] ): _lowerCAmelCase :Tuple = self.get_tokenizer() _lowerCAmelCase :Union[str, Any] = BarkProcessor(tokenizer=_UpperCAmelCase ) _lowerCAmelCase :List[Any] = processor(text=self.input_string ) _lowerCAmelCase :List[str] = tokenizer( self.input_string , padding='max_length' , max_length=256 , add_special_tokens=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
687
0
import copy from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = { 'microsoft/conditional-detr-resnet-50': ( 'https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json' ), } class __a ( snake_case__ ): __snake_case : List[Any] = 'conditional_detr' __snake_case : Union[str, Any] = ['past_key_values'] __snake_case : Union[str, Any] = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self : int , UpperCAmelCase : Dict=True , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Any=3 , UpperCAmelCase : str=3_00 , UpperCAmelCase : int=6 , UpperCAmelCase : Dict=20_48 , UpperCAmelCase : List[Any]=8 , UpperCAmelCase : List[Any]=6 , UpperCAmelCase : int=20_48 , UpperCAmelCase : Optional[Any]=8 , UpperCAmelCase : Tuple=0.0 , UpperCAmelCase : Tuple=0.0 , UpperCAmelCase : List[str]=True , UpperCAmelCase : Optional[Any]="relu" , UpperCAmelCase : List[str]=2_56 , UpperCAmelCase : Tuple=0.1 , UpperCAmelCase : Optional[int]=0.0 , UpperCAmelCase : Tuple=0.0 , UpperCAmelCase : int=0.02 , UpperCAmelCase : Union[str, Any]=1.0 , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : str="sine" , UpperCAmelCase : Dict="resnet50" , UpperCAmelCase : List[str]=True , UpperCAmelCase : Union[str, Any]=False , UpperCAmelCase : str=2 , UpperCAmelCase : Optional[int]=5 , UpperCAmelCase : int=2 , UpperCAmelCase : Optional[int]=1 , UpperCAmelCase : str=1 , UpperCAmelCase : Optional[Any]=2 , UpperCAmelCase : Dict=5 , UpperCAmelCase : List[Any]=2 , UpperCAmelCase : Any=0.25 , **UpperCAmelCase : Any , ): if backbone_config is not None and use_timm_backbone: raise ValueError("""You can\'t specify both `backbone_config` and `use_timm_backbone`.""" ) if not use_timm_backbone: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) lowerCAmelCase_ : str = CONFIG_MAPPING['resnet'](out_features=["""stage4"""] ) elif isinstance(_UpperCAmelCase , _UpperCAmelCase ): lowerCAmelCase_ : Dict = backbone_config.get("""model_type""" ) lowerCAmelCase_ : Any = CONFIG_MAPPING[backbone_model_type] lowerCAmelCase_ : Dict = config_class.from_dict(_UpperCAmelCase ) lowerCAmelCase_ : Tuple = use_timm_backbone lowerCAmelCase_ : Tuple = backbone_config lowerCAmelCase_ : Any = num_channels lowerCAmelCase_ : List[Any] = num_queries lowerCAmelCase_ : List[Any] = d_model lowerCAmelCase_ : Union[str, Any] = encoder_ffn_dim lowerCAmelCase_ : Union[str, Any] = encoder_layers lowerCAmelCase_ : str = encoder_attention_heads lowerCAmelCase_ : str = decoder_ffn_dim lowerCAmelCase_ : List[Any] = decoder_layers lowerCAmelCase_ : List[str] = decoder_attention_heads lowerCAmelCase_ : Dict = dropout lowerCAmelCase_ : Dict = attention_dropout lowerCAmelCase_ : Optional[Any] = activation_dropout lowerCAmelCase_ : Any = activation_function lowerCAmelCase_ : Union[str, Any] = init_std lowerCAmelCase_ : Tuple = init_xavier_std lowerCAmelCase_ : int = encoder_layerdrop lowerCAmelCase_ : str = decoder_layerdrop lowerCAmelCase_ : Tuple = encoder_layers lowerCAmelCase_ : Tuple = auxiliary_loss lowerCAmelCase_ : Tuple = position_embedding_type lowerCAmelCase_ : int = backbone lowerCAmelCase_ : Dict = use_pretrained_backbone lowerCAmelCase_ : List[str] = dilation # Hungarian matcher lowerCAmelCase_ : Any = class_cost lowerCAmelCase_ : Tuple = bbox_cost lowerCAmelCase_ : Optional[int] = giou_cost # Loss coefficients lowerCAmelCase_ : Optional[int] = mask_loss_coefficient lowerCAmelCase_ : List[str] = dice_loss_coefficient lowerCAmelCase_ : int = cls_loss_coefficient lowerCAmelCase_ : Dict = bbox_loss_coefficient lowerCAmelCase_ : int = giou_loss_coefficient lowerCAmelCase_ : List[Any] = focal_alpha super().__init__(is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase ) @property def A ( self : List[Any] ): return self.encoder_attention_heads @property def A ( self : Tuple ): return self.d_model def A ( self : Optional[int] ): lowerCAmelCase_ : Dict = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: lowerCAmelCase_ : Optional[int] = self.backbone_config.to_dict() lowerCAmelCase_ : List[str] = self.__class__.model_type return output class __a ( snake_case__ ): __snake_case : Optional[Any] = version.parse("""1.11""" ) @property def A ( self : Optional[int] ): return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""pixel_mask""", {0: """batch"""}), ] ) @property def A ( self : Union[str, Any] ): return 1e-5 @property def A ( self : Dict ): return 12
600
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a = logging.get_logger(__name__) a = { """bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/config.json""", """bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/config.json""", """bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/config.json""", """bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/config.json""", """bert-base-multilingual-uncased""": """https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json""", """bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json""", """bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/config.json""", """bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/config.json""", """bert-large-uncased-whole-word-masking""": ( """https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json""" ), """bert-large-cased-whole-word-masking""": ( """https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json""" ), """bert-large-uncased-whole-word-masking-finetuned-squad""": ( """https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json""" ), """bert-large-cased-whole-word-masking-finetuned-squad""": ( """https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json""" ), """bert-base-cased-finetuned-mrpc""": """https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json""", """bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json""", """bert-base-german-dbmdz-uncased""": """https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json""", """cl-tohoku/bert-base-japanese""": """https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json""", """cl-tohoku/bert-base-japanese-whole-word-masking""": ( """https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json""" ), """cl-tohoku/bert-base-japanese-char""": ( """https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json""" ), """cl-tohoku/bert-base-japanese-char-whole-word-masking""": ( """https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json""" ), """TurkuNLP/bert-base-finnish-cased-v1""": ( """https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json""" ), """TurkuNLP/bert-base-finnish-uncased-v1""": ( """https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json""" ), """wietsedv/bert-base-dutch-cased""": """https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json""", # See all BERT models at https://huggingface.co/models?filter=bert } class UpperCAmelCase_ (snake_case__ ): """simple docstring""" lowerCamelCase : int = 'bert' def __init__( self: Optional[Any] , _UpperCAmelCase: Tuple=3_0522 , _UpperCAmelCase: int=768 , _UpperCAmelCase: Union[str, Any]=12 , _UpperCAmelCase: Dict=12 , _UpperCAmelCase: List[Any]=3072 , _UpperCAmelCase: List[Any]="gelu" , _UpperCAmelCase: Union[str, Any]=0.1 , _UpperCAmelCase: Dict=0.1 , _UpperCAmelCase: List[Any]=512 , _UpperCAmelCase: Optional[Any]=2 , _UpperCAmelCase: Optional[int]=0.0_2 , _UpperCAmelCase: Any=1e-1_2 , _UpperCAmelCase: Optional[Any]=0 , _UpperCAmelCase: Union[str, Any]="absolute" , _UpperCAmelCase: Dict=True , _UpperCAmelCase: Optional[Any]=None , **_UpperCAmelCase: Optional[int] , ): super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase ) _lowerCAmelCase :List[Any] = vocab_size _lowerCAmelCase :Tuple = hidden_size _lowerCAmelCase :Dict = num_hidden_layers _lowerCAmelCase :Optional[Any] = num_attention_heads _lowerCAmelCase :List[Any] = hidden_act _lowerCAmelCase :int = intermediate_size _lowerCAmelCase :Tuple = hidden_dropout_prob _lowerCAmelCase :Tuple = attention_probs_dropout_prob _lowerCAmelCase :List[Any] = max_position_embeddings _lowerCAmelCase :Dict = type_vocab_size _lowerCAmelCase :Any = initializer_range _lowerCAmelCase :int = layer_norm_eps _lowerCAmelCase :List[Any] = position_embedding_type _lowerCAmelCase :int = use_cache _lowerCAmelCase :Union[str, Any] = classifier_dropout class UpperCAmelCase_ (snake_case__ ): """simple docstring""" @property def SCREAMING_SNAKE_CASE__ ( self: List[Any] ): if self.task == "multiple-choice": _lowerCAmelCase :List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _lowerCAmelCase :Any = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ] )
687
0