code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class A__ ( _snake_case , unittest.TestCase): """simple docstring""" snake_case__ : Tuple =DiTPipeline snake_case__ : Optional[int] =CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS snake_case__ : Optional[Any] =PipelineTesterMixin.required_optional_params - { '''latents''', '''num_images_per_prompt''', '''callback''', '''callback_steps''', } snake_case__ : Tuple =CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS snake_case__ : Optional[int] =False def a__ ( self: List[Any] )-> Any: torch.manual_seed(0 ) lowerCamelCase : int = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowerCAmelCase__ , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1_000 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=lowerCAmelCase__ , ) lowerCamelCase : List[Any] = AutoencoderKL() lowerCamelCase : Optional[Any] = DDIMScheduler() lowerCamelCase : Optional[Any] = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler} return components def a__ ( self: int , __a: Optional[int] , __a: List[Any]=0 )-> Tuple: if str(lowerCAmelCase__ ).startswith("""mps""" ): lowerCamelCase : int = torch.manual_seed(lowerCAmelCase__ ) else: lowerCamelCase : Union[str, Any] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ ) lowerCamelCase : Optional[int] = { """class_labels""": [1], """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def a__ ( self: Union[str, Any] )-> Any: lowerCamelCase : str = """cpu""" lowerCamelCase : Optional[int] = self.get_dummy_components() lowerCamelCase : str = self.pipeline_class(**lowerCAmelCase__ ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) lowerCamelCase : Dict = self.get_dummy_inputs(lowerCAmelCase__ ) lowerCamelCase : Dict = pipe(**lowerCAmelCase__ ).images lowerCamelCase : Union[str, Any] = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) lowerCamelCase : Optional[Any] = np.array([0.29_46, 0.66_01, 0.43_29, 0.32_96, 0.41_44, 0.53_19, 0.72_73, 0.50_13, 0.44_57] ) lowerCamelCase : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowerCAmelCase__ , 1e-3 ) def a__ ( self: Optional[int] )-> Union[str, Any]: self._test_inference_batch_single_identical(relax_max_difference=lowerCAmelCase__ , expected_max_diff=1e-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def a__ ( self: Any )-> str: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @require_torch_gpu @slow class A__ ( unittest.TestCase): """simple docstring""" def a__ ( self: str )-> Dict: super().tearDown() gc.collect() torch.cuda.empty_cache() def a__ ( self: List[str] )-> Optional[Any]: lowerCamelCase : List[str] = torch.manual_seed(0 ) lowerCamelCase : Dict = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" ) pipe.to("""cuda""" ) lowerCamelCase : Any = ["""vase""", """umbrella""", """white shark""", """white wolf"""] lowerCamelCase : List[str] = pipe.get_label_ids(lowerCAmelCase__ ) lowerCamelCase : List[Any] = pipe(lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=40 , output_type="""np""" ).images for word, image in zip(lowerCAmelCase__ , lowerCAmelCase__ ): lowerCamelCase : Optional[Any] = load_numpy( f'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' ) assert np.abs((expected_image - image).max() ) < 1e-2 def a__ ( self: Optional[int] )-> List[Any]: lowerCamelCase : List[str] = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" ) lowerCamelCase : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to("""cuda""" ) lowerCamelCase : Optional[Any] = ["""vase""", """umbrella"""] lowerCamelCase : Any = pipe.get_label_ids(lowerCAmelCase__ ) lowerCamelCase : Dict = torch.manual_seed(0 ) lowerCamelCase : str = pipe(lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=25 , output_type="""np""" ).images for word, image in zip(lowerCAmelCase__ , lowerCAmelCase__ ): lowerCamelCase : Dict = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" f'/dit/{word}_512.npy' ) assert np.abs((expected_image - image).max() ) < 1e-1
701
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) __lowerCamelCase :List[str] = { 'configuration_owlvit': [ 'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OwlViTConfig', 'OwlViTOnnxConfig', 'OwlViTTextConfig', 'OwlViTVisionConfig', ], 'processing_owlvit': ['OwlViTProcessor'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase :Optional[int] = ['OwlViTFeatureExtractor'] __lowerCamelCase :List[str] = ['OwlViTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase :Optional[Any] = [ 'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'OwlViTModel', 'OwlViTPreTrainedModel', 'OwlViTTextModel', 'OwlViTVisionModel', 'OwlViTForObjectDetection', ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys __lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
42
0
"""simple docstring""" import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html __lowerCamelCase :List[str] = 'platform' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class A__ : """simple docstring""" snake_case__ : List[str] =PegasusConfig snake_case__ : Dict ={} snake_case__ : Optional[Any] ="""gelu""" def __init__( self: List[Any] , __a: Optional[int] , __a: str=13 , __a: Dict=7 , __a: Dict=True , __a: List[str]=False , __a: List[Any]=99 , __a: Union[str, Any]=32 , __a: List[str]=5 , __a: str=4 , __a: Any=37 , __a: int=0.1 , __a: Optional[Any]=0.1 , __a: Union[str, Any]=20 , __a: Dict=2 , __a: Optional[Any]=1 , __a: Tuple=0 , )-> Optional[int]: lowerCamelCase : Union[str, Any] = parent lowerCamelCase : Dict = batch_size lowerCamelCase : Optional[int] = seq_length lowerCamelCase : Optional[Any] = is_training lowerCamelCase : Union[str, Any] = use_labels lowerCamelCase : Optional[int] = vocab_size lowerCamelCase : int = hidden_size lowerCamelCase : str = num_hidden_layers lowerCamelCase : Optional[Any] = num_attention_heads lowerCamelCase : Union[str, Any] = intermediate_size lowerCamelCase : Tuple = hidden_dropout_prob lowerCamelCase : List[Any] = attention_probs_dropout_prob lowerCamelCase : List[str] = max_position_embeddings lowerCamelCase : int = eos_token_id lowerCamelCase : Any = pad_token_id lowerCamelCase : int = bos_token_id def a__ ( self: int )-> Any: lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) lowerCamelCase : int = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) lowerCamelCase : List[Any] = np.concatenate([input_ids, eos_tensor] , axis=1 ) lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase : Optional[int] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) lowerCamelCase : Tuple = prepare_pegasus_inputs_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) return config, inputs_dict def a__ ( self: Optional[int] , __a: Any , __a: int , __a: str )-> str: lowerCamelCase : Optional[int] = 20 lowerCamelCase : Union[str, Any] = model_class_name(UpperCAmelCase_ ) lowerCamelCase : str = model.encode(inputs_dict["""input_ids"""] ) lowerCamelCase , lowerCamelCase : Optional[Any] = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) lowerCamelCase : int = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase_ , UpperCAmelCase_ ) lowerCamelCase : int = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) lowerCamelCase : Dict = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowerCamelCase : Optional[Any] = model.decode( decoder_input_ids[:, :-1] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , ) lowerCamelCase : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) lowerCamelCase : str = model.decode( decoder_input_ids[:, -1:] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase_ , ) lowerCamelCase : Tuple = model.decode(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCamelCase : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' ) def a__ ( self: List[str] , __a: int , __a: Tuple , __a: List[Any] )-> Dict: lowerCamelCase : str = 20 lowerCamelCase : Union[str, Any] = model_class_name(UpperCAmelCase_ ) lowerCamelCase : Optional[int] = model.encode(inputs_dict["""input_ids"""] ) lowerCamelCase , lowerCamelCase : Any = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) lowerCamelCase : Union[str, Any] = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) lowerCamelCase : Any = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase_ , UpperCAmelCase_ ) lowerCamelCase : Dict = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowerCamelCase : Optional[Any] = model.decode( decoder_input_ids[:, :-1] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , ) lowerCamelCase : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) lowerCamelCase : Optional[Any] = model.decode( decoder_input_ids[:, -1:] , UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , ) lowerCamelCase : Union[str, Any] = model.decode(UpperCAmelCase_ , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ ) lowerCamelCase : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' ) def snake_case ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any=None , UpperCamelCase__ : Optional[int]=None , ) -> Dict: if attention_mask is None: lowerCamelCase : int = np.not_equal(_SCREAMING_SNAKE_CASE , config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: lowerCamelCase : Optional[int] = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class A__ ( snake_case__ , unittest.TestCase): """simple docstring""" snake_case__ : Optional[int] =( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) snake_case__ : Union[str, Any] =(FlaxPegasusForConditionalGeneration,) if is_flax_available() else () snake_case__ : List[str] =True snake_case__ : str =False snake_case__ : Optional[int] =False snake_case__ : List[str] =False def a__ ( self: Tuple )-> int: lowerCamelCase : List[Any] = FlaxPegasusModelTester(self ) lowerCamelCase : str = ConfigTester(self , config_class=UpperCAmelCase_ ) def a__ ( self: Union[str, Any] )-> Optional[Any]: self.config_tester.run_common_tests() def a__ ( self: Tuple )-> Dict: lowerCamelCase , lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) def a__ ( self: Optional[int] )-> List[Any]: lowerCamelCase , lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) def a__ ( self: Any )-> List[Any]: lowerCamelCase , lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowerCamelCase : Dict = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCamelCase : Union[str, Any] = model_class(UpperCAmelCase_ ) @jax.jit def encode_jitted(__a: Optional[int] , __a: Optional[Any]=None , **__a: Optional[Any] ): return model.encode(input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ ) with self.subTest("""JIT Enabled""" ): lowerCamelCase : Dict = encode_jitted(**UpperCAmelCase_ ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): lowerCamelCase : Tuple = encode_jitted(**UpperCAmelCase_ ).to_tuple() self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) ) for jitted_output, output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assertEqual(jitted_output.shape , output.shape ) def a__ ( self: List[Any] )-> Any: lowerCamelCase , lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowerCamelCase : Union[str, Any] = model_class(UpperCAmelCase_ ) lowerCamelCase : Dict = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] ) lowerCamelCase : List[Any] = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(__a: Optional[int] , __a: Dict , __a: List[Any] ): return model.decode( decoder_input_ids=UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , encoder_outputs=UpperCAmelCase_ , ) with self.subTest("""JIT Enabled""" ): lowerCamelCase : Optional[int] = decode_jitted(**UpperCAmelCase_ ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): lowerCamelCase : Optional[Any] = decode_jitted(**UpperCAmelCase_ ).to_tuple() self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) ) for jitted_output, output in zip(UpperCAmelCase_ , UpperCAmelCase_ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def a__ ( self: Any )-> List[str]: for model_class_name in self.all_model_classes: lowerCamelCase : int = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=UpperCAmelCase_ ) lowerCamelCase : List[Any] = np.ones((1, 1) ) lowerCamelCase : Any = model(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) @slow def a__ ( self: Dict )-> Tuple: lowerCamelCase : Optional[Any] = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" ) lowerCamelCase : Optional[int] = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" ) lowerCamelCase : str = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] lowerCamelCase : Optional[int] = [ """California's largest electricity provider has turned off power to hundreds of thousands of customers.""", """Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""", ] lowerCamelCase : str = tokenizer(UpperCAmelCase_ , return_tensors="""np""" , truncation=UpperCAmelCase_ , max_length=512 , padding=UpperCAmelCase_ ) lowerCamelCase : Union[str, Any] = model.generate(**UpperCAmelCase_ , num_beams=2 ).sequences lowerCamelCase : str = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ ) assert tgt_text == decoded
702
"""simple docstring""" import collections import inspect import unittest from transformers import FocalNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, ) from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A__ : """simple docstring""" def __init__( self: List[Any] , __a: List[str] , __a: Optional[int]=13 , __a: List[str]=32 , __a: int=2 , __a: List[str]=3 , __a: Union[str, Any]=16 , __a: int=[32, 64, 128] , __a: Optional[Any]=[1, 2, 1] , __a: Optional[int]=[2, 2, 4] , __a: Tuple=2 , __a: Dict=2.0 , __a: List[str]=True , __a: Optional[Any]=0.0 , __a: Any=0.0 , __a: List[Any]=0.1 , __a: List[str]="gelu" , __a: Tuple=False , __a: Union[str, Any]=True , __a: Optional[int]=0.02 , __a: Tuple=1e-5 , __a: int=True , __a: List[Any]=None , __a: Optional[int]=True , __a: Dict=10 , __a: List[str]=8 , __a: Any=["stage1", "stage2"] , __a: Union[str, Any]=[1, 2] , )-> Dict: lowerCamelCase : Dict = parent lowerCamelCase : Optional[Any] = batch_size lowerCamelCase : Union[str, Any] = image_size lowerCamelCase : Optional[int] = patch_size lowerCamelCase : Any = num_channels lowerCamelCase : Any = embed_dim lowerCamelCase : Dict = hidden_sizes lowerCamelCase : List[Any] = depths lowerCamelCase : Tuple = num_heads lowerCamelCase : List[Any] = window_size lowerCamelCase : str = mlp_ratio lowerCamelCase : str = qkv_bias lowerCamelCase : str = hidden_dropout_prob lowerCamelCase : Dict = attention_probs_dropout_prob lowerCamelCase : Tuple = drop_path_rate lowerCamelCase : Dict = hidden_act lowerCamelCase : Tuple = use_absolute_embeddings lowerCamelCase : List[str] = patch_norm lowerCamelCase : List[str] = layer_norm_eps lowerCamelCase : str = initializer_range lowerCamelCase : Tuple = is_training lowerCamelCase : int = scope lowerCamelCase : Union[str, Any] = use_labels lowerCamelCase : List[str] = type_sequence_label_size lowerCamelCase : str = encoder_stride lowerCamelCase : List[str] = out_features lowerCamelCase : Optional[int] = out_indices def a__ ( self: Optional[Any] )-> Union[str, Any]: lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase : str = None if self.use_labels: lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase : str = self.get_config() return config, pixel_values, labels def a__ ( self: List[Any] )-> Optional[int]: return FocalNetConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def a__ ( self: Tuple , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]: lowerCamelCase : Tuple = FocalNetModel(config=__a ) model.to(__a ) model.eval() lowerCamelCase : Tuple = model(__a ) lowerCamelCase : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowerCamelCase : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def a__ ( self: Optional[int] , __a: Dict , __a: Tuple , __a: List[Any] )-> int: lowerCamelCase : List[Any] = FocalNetBackbone(config=__a ) model.to(__a ) model.eval() lowerCamelCase : Optional[Any] = model(__a ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] ) # verify backbone works with out_features=None lowerCamelCase : Dict = None lowerCamelCase : Dict = FocalNetBackbone(config=__a ) model.to(__a ) model.eval() lowerCamelCase : Any = model(__a ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def a__ ( self: Optional[int] , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]: lowerCamelCase : Tuple = FocalNetForMaskedImageModeling(config=__a ) model.to(__a ) model.eval() lowerCamelCase : List[str] = model(__a ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCamelCase : List[str] = 1 lowerCamelCase : Any = FocalNetForMaskedImageModeling(__a ) model.to(__a ) model.eval() lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase : Tuple = model(__a ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def a__ ( self: str , __a: Optional[Any] , __a: Optional[Any] , __a: Tuple )-> str: lowerCamelCase : Optional[Any] = self.type_sequence_label_size lowerCamelCase : Optional[Any] = FocalNetForImageClassification(__a ) model.to(__a ) model.eval() lowerCamelCase : List[str] = model(__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCamelCase : int = 1 lowerCamelCase : List[Any] = FocalNetForImageClassification(__a ) model.to(__a ) model.eval() lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase : Optional[Any] = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def a__ ( self: int )-> Optional[int]: lowerCamelCase : str = self.prepare_config_and_inputs() lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = config_and_inputs lowerCamelCase : List[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class A__ ( __lowercase , __lowercase , unittest.TestCase): """simple docstring""" snake_case__ : List[str] =( ( FocalNetModel, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetBackbone, ) if is_torch_available() else () ) snake_case__ : Optional[int] =( {'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification} if is_torch_available() else {} ) snake_case__ : Tuple =False snake_case__ : Dict =False snake_case__ : Dict =False snake_case__ : Tuple =False snake_case__ : Optional[int] =False def a__ ( self: Union[str, Any] )-> Optional[int]: lowerCamelCase : List[str] = FocalNetModelTester(self ) lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__a , embed_dim=37 , has_text_modality=__a ) def a__ ( self: List[str] )-> List[str]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a__ ( self: List[str] )-> Union[str, Any]: return def a__ ( self: Tuple )-> Tuple: lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def a__ ( self: List[Any] )-> Dict: lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__a ) def a__ ( self: List[Any] )-> Tuple: lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__a ) def a__ ( self: List[str] )-> Dict: lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a ) @unittest.skip(reason="""FocalNet does not use inputs_embeds""" ) def a__ ( self: Optional[Any] )-> str: pass @unittest.skip(reason="""FocalNet does not use feedforward chunking""" ) def a__ ( self: Optional[Any] )-> Dict: pass def a__ ( self: Optional[Any] )-> Dict: lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: lowerCamelCase : Any = model_class(__a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCamelCase : Dict = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__a , nn.Linear ) ) def a__ ( self: Tuple )-> Optional[int]: lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: lowerCamelCase : int = model_class(__a ) lowerCamelCase : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase : Any = [*signature.parameters.keys()] lowerCamelCase : List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __a ) def a__ ( self: str , __a: Union[str, Any] , __a: int , __a: Tuple , __a: List[str] )-> Union[str, Any]: lowerCamelCase : List[Any] = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): lowerCamelCase : List[str] = model(**self._prepare_for_class(__a , __a ) ) lowerCamelCase : List[str] = outputs.hidden_states lowerCamelCase : Tuple = getattr( self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(__a ) , __a ) # FocalNet has a different seq_length lowerCamelCase : Tuple = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCamelCase : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) lowerCamelCase : Optional[Any] = outputs.reshaped_hidden_states self.assertEqual(len(__a ) , __a ) lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = reshaped_hidden_states[0].shape lowerCamelCase : Tuple = ( reshaped_hidden_states[0].view(__a , __a , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def a__ ( self: Any )-> Any: lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase : Union[str, Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes[:-1]: lowerCamelCase : List[str] = True self.check_hidden_states_output(__a , __a , __a , __a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase : List[Any] = True self.check_hidden_states_output(__a , __a , __a , __a ) def a__ ( self: str )-> Union[str, Any]: lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase : List[str] = 3 lowerCamelCase : Any = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowerCamelCase : Optional[int] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCamelCase : Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowerCamelCase : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes[:-1]: lowerCamelCase : str = True self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase : Union[str, Any] = True self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) ) @slow def a__ ( self: Optional[int] )-> List[Any]: for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase : List[str] = FocalNetModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def a__ ( self: str )-> Any: lowerCamelCase , lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase : int = _config_zero_init(__a ) for model_class in self.all_model_classes: lowerCamelCase : int = model_class(config=__a ) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) @require_vision @require_torch class A__ ( unittest.TestCase): """simple docstring""" @cached_property def a__ ( self: Optional[int] )-> Optional[Any]: # TODO update organization return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None @slow def a__ ( self: int )-> Optional[Any]: lowerCamelCase : Tuple = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(__a ) lowerCamelCase : Any = self.default_image_processor lowerCamelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) lowerCamelCase : int = image_processor(images=__a , return_tensors="""pt""" ).to(__a ) # forward pass with torch.no_grad(): lowerCamelCase : Any = model(**__a ) # verify the logits lowerCamelCase : Tuple = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , __a ) lowerCamelCase : List[str] = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(__a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) ) self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 ) @require_torch class A__ ( __lowercase , unittest.TestCase): """simple docstring""" snake_case__ : str =(FocalNetBackbone,) if is_torch_available() else () snake_case__ : Optional[int] =FocalNetConfig snake_case__ : str =False def a__ ( self: Union[str, Any] )-> Tuple: lowerCamelCase : str = FocalNetModelTester(self )
42
0
"""simple docstring""" import argparse import os from pathlib import Path from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params __lowerCamelCase :List[str] = [ # replace left string with right string to get the relevant state_dict key (identical state dict to bart) ["memory_attention", "encoder_attn"], ["attention", "attn"], ["/", "."], [".LayerNorm.gamma", "_layer_norm.weight"], [".LayerNorm.beta", "_layer_norm.bias"], ["r.layer_", "r.layers."], ["output_proj", "out_proj"], ["ffn.dense_1.", "fc2."], ["ffn.dense.", "fc1."], ["ffn_layer_norm", "final_layer_norm"], ["kernel", "weight"], ["encoder_layer_norm.", "encoder.layer_norm."], ["decoder_layer_norm.", "decoder.layer_norm."], ["embeddings.weights", "shared.weight"], ] def snake_case ( UpperCamelCase__ : int ) -> List[str]: '''simple docstring''' for pegasus_name, hf_name in PATTERNS: lowerCamelCase : List[Any] = k.replace(UpperCamelCase__ , UpperCamelCase__ ) return k def snake_case ( UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] ) -> PegasusForConditionalGeneration: '''simple docstring''' lowerCamelCase : Union[str, Any] = DEFAULTS.copy() cfg_kwargs.update(UpperCamelCase__ ) lowerCamelCase : List[Any] = PegasusConfig(**UpperCamelCase__ ) lowerCamelCase : Optional[Any] = PegasusForConditionalGeneration(UpperCamelCase__ ) lowerCamelCase : str = torch_model.model.state_dict() lowerCamelCase : Any = {} for k, v in tf_weights.items(): lowerCamelCase : Optional[Any] = rename_state_dict_key(UpperCamelCase__ ) if new_k not in sd: raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' ) if "dense" in k or "proj" in new_k: lowerCamelCase : Dict = v.T lowerCamelCase : Optional[Any] = torch.tensor(UpperCamelCase__ , dtype=sd[new_k].dtype ) assert v.shape == sd[new_k].shape, F'{new_k}, {k}, {v.shape}, {sd[new_k].shape}' # make sure embedding.padding_idx is respected lowerCamelCase : int = torch.zeros_like(mapping["""shared.weight"""][cfg.pad_token_id + 1] ) lowerCamelCase : Tuple = mapping["""shared.weight"""] lowerCamelCase : List[str] = mapping["""shared.weight"""] lowerCamelCase : str = {k: torch.zeros_like(UpperCamelCase__ ) for k, v in sd.items() if k.endswith("""bias""" ) and k not in mapping} mapping.update(**UpperCamelCase__ ) lowerCamelCase , lowerCamelCase : Tuple = torch_model.model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ ) lowerCamelCase : List[str] = [ k for k in missing if k not in ["""encoder.embed_positions.weight""", """decoder.embed_positions.weight"""] ] assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}' assert extra == [], F'no matches found for the following tf keys {extra}' return torch_model def snake_case ( UpperCamelCase__ : Optional[int]="./ckpt/aeslc/model.ckpt-32000" ) -> Dict: '''simple docstring''' lowerCamelCase : int = tf.train.list_variables(UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = {} lowerCamelCase : Dict = ["""Adafactor""", """global_step"""] for name, shape in tqdm(UpperCamelCase__ , desc="""converting tf checkpoint to dict""" ): lowerCamelCase : Optional[int] = any(pat in name for pat in ignore_name ) if skip_key: continue lowerCamelCase : Any = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = array return tf_weights def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : str ) -> Union[str, Any]: '''simple docstring''' lowerCamelCase : Any = Path(UpperCamelCase__ ).parent.name lowerCamelCase : List[Any] = task_specific_params[F'summarization_{dataset}']["""max_position_embeddings"""] lowerCamelCase : List[Any] = PegasusTokenizer.from_pretrained("""sshleifer/pegasus""" , model_max_length=UpperCamelCase__ ) assert tok.model_max_length == desired_max_model_length tok.save_pretrained(UpperCamelCase__ ) # convert model lowerCamelCase : Tuple = get_tf_weights_as_numpy(UpperCamelCase__ ) lowerCamelCase : Dict = task_specific_params[F'summarization_{dataset}'] if dataset == "large": lowerCamelCase : Any = task_specific_params lowerCamelCase : Optional[int] = convert_pegasus(UpperCamelCase__ , UpperCamelCase__ ) torch_model.save_pretrained(UpperCamelCase__ ) lowerCamelCase : int = torch_model.state_dict() sd.pop("""model.decoder.embed_positions.weight""" ) sd.pop("""model.encoder.embed_positions.weight""" ) torch.save(UpperCamelCase__ , Path(UpperCamelCase__ ) / """pytorch_model.bin""" ) if __name__ == "__main__": __lowerCamelCase :str = argparse.ArgumentParser() # Required parameters parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables') parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.') __lowerCamelCase :Tuple = parser.parse_args() if args.save_dir is None: __lowerCamelCase :Optional[int] = Path(args.tf_ckpt_path).parent.name __lowerCamelCase :Union[str, Any] = os.path.join('pegasus', dataset) convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
703
"""simple docstring""" import os def snake_case ( ) -> Optional[Any]: with open(os.path.dirname(UpperCamelCase__ ) + """/grid.txt""" ) as f: lowerCamelCase : int = [] # noqa: E741 for _ in range(20 ): l.append([int(UpperCamelCase__ ) for x in f.readline().split()] ) lowerCamelCase : Union[str, Any] = 0 # right for i in range(20 ): for j in range(17 ): lowerCamelCase : Dict = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3] if temp > maximum: lowerCamelCase : Tuple = temp # down for i in range(17 ): for j in range(20 ): lowerCamelCase : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j] if temp > maximum: lowerCamelCase : Optional[Any] = temp # diagonal 1 for i in range(17 ): for j in range(17 ): lowerCamelCase : List[Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3] if temp > maximum: lowerCamelCase : List[str] = temp # diagonal 2 for i in range(17 ): for j in range(3 , 20 ): lowerCamelCase : List[str] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3] if temp > maximum: lowerCamelCase : List[Any] = temp return maximum if __name__ == "__main__": print(solution())
42
0
"""simple docstring""" from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class A__ : """simple docstring""" snake_case__ : Tuple =BlenderbotSmallConfig snake_case__ : List[Any] ={} snake_case__ : Union[str, Any] ='''gelu''' def __init__( self: Optional[Any] , __a: int , __a: Optional[Any]=13 , __a: Union[str, Any]=7 , __a: Any=True , __a: Optional[int]=False , __a: str=99 , __a: Optional[int]=32 , __a: Optional[Any]=2 , __a: str=4 , __a: Union[str, Any]=37 , __a: List[Any]=0.1 , __a: Any=0.1 , __a: Dict=20 , __a: Optional[Any]=2 , __a: Any=1 , __a: Optional[int]=0 , )-> str: lowerCamelCase : Tuple = parent lowerCamelCase : Optional[Any] = batch_size lowerCamelCase : Union[str, Any] = seq_length lowerCamelCase : Dict = is_training lowerCamelCase : List[str] = use_labels lowerCamelCase : int = vocab_size lowerCamelCase : Any = hidden_size lowerCamelCase : Union[str, Any] = num_hidden_layers lowerCamelCase : Optional[int] = num_attention_heads lowerCamelCase : Any = intermediate_size lowerCamelCase : str = hidden_dropout_prob lowerCamelCase : Tuple = attention_probs_dropout_prob lowerCamelCase : Optional[Any] = max_position_embeddings lowerCamelCase : str = eos_token_id lowerCamelCase : List[str] = pad_token_id lowerCamelCase : str = bos_token_id def a__ ( self: int )-> List[Any]: lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) lowerCamelCase : List[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) lowerCamelCase : Dict = tf.concat([input_ids, eos_tensor] , axis=1 ) lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase : Any = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) lowerCamelCase : int = prepare_blenderbot_small_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) return config, inputs_dict def a__ ( self: Tuple , __a: Optional[int] , __a: Optional[Any] )-> List[str]: lowerCamelCase : List[Any] = TFBlenderbotSmallModel(config=UpperCamelCase_ ).get_decoder() lowerCamelCase : str = inputs_dict['input_ids'] lowerCamelCase : Union[str, Any] = input_ids[:1, :] lowerCamelCase : List[str] = inputs_dict['attention_mask'][:1, :] lowerCamelCase : Optional[Any] = inputs_dict['head_mask'] lowerCamelCase : Optional[int] = 1 # first forward pass lowerCamelCase : Any = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ ) lowerCamelCase : Tuple = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids lowerCamelCase : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCamelCase : List[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and lowerCamelCase : str = tf.concat([input_ids, next_tokens] , axis=-1 ) lowerCamelCase : Dict = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) lowerCamelCase : Optional[int] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )[0] lowerCamelCase : Optional[int] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice lowerCamelCase : Union[str, Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) lowerCamelCase : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx] lowerCamelCase : Any = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(UpperCamelCase_ , UpperCamelCase_ , rtol=1e-3 ) def snake_case ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : str=None , ) -> Tuple: if attention_mask is None: lowerCamelCase : Any = tf.cast(tf.math.not_equal(_lowercase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: lowerCamelCase : Optional[Any] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: lowerCamelCase : str = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowerCamelCase : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: lowerCamelCase : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class A__ ( __lowercase , __lowercase , unittest.TestCase): """simple docstring""" snake_case__ : Optional[int] =( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) snake_case__ : List[str] =(TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () snake_case__ : Dict =( { '''conversational''': TFBlenderbotSmallForConditionalGeneration, '''feature-extraction''': TFBlenderbotSmallModel, '''summarization''': TFBlenderbotSmallForConditionalGeneration, '''text2text-generation''': TFBlenderbotSmallForConditionalGeneration, '''translation''': TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) snake_case__ : int =True snake_case__ : Optional[Any] =False snake_case__ : Optional[Any] =False def a__ ( self: int )-> List[str]: lowerCamelCase : Union[str, Any] = TFBlenderbotSmallModelTester(self ) lowerCamelCase : Tuple = ConfigTester(self , config_class=UpperCamelCase_ ) def a__ ( self: Optional[int] )-> Dict: self.config_tester.run_common_tests() def a__ ( self: Optional[int] )-> Union[str, Any]: lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase_ ) @require_tokenizers @require_tf class A__ ( unittest.TestCase): """simple docstring""" snake_case__ : Optional[int] =[ '''Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like ''' ''' i\'m going to throw up.\nand why is that?''' ] snake_case__ : str ='''facebook/blenderbot_small-90M''' @cached_property def a__ ( self: Optional[int] )-> Any: return BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" ) @cached_property def a__ ( self: Union[str, Any] )-> List[str]: lowerCamelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def a__ ( self: Union[str, Any] )-> List[Any]: lowerCamelCase : Optional[Any] = self.tokenizer(self.src_text , return_tensors="""tf""" ) lowerCamelCase : List[Any] = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=UpperCamelCase_ , ) lowerCamelCase : Optional[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCamelCase_ )[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
704
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionAttendAndExcitePipeline, UNetaDConditionModel, ) from diffusers.utils import load_numpy, skip_mps, slow from diffusers.utils.testing_utils import require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin __lowerCamelCase :Any = False @skip_mps class A__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase): """simple docstring""" snake_case__ : Optional[Any] =StableDiffusionAttendAndExcitePipeline snake_case__ : Any =False snake_case__ : Dict =TEXT_TO_IMAGE_PARAMS snake_case__ : Any =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''}) snake_case__ : Dict =TEXT_TO_IMAGE_IMAGE_PARAMS snake_case__ : str =TEXT_TO_IMAGE_IMAGE_PARAMS @classmethod def a__ ( cls: Dict )-> Tuple: super().setUpClass() torch.use_deterministic_algorithms(__a ) @classmethod def a__ ( cls: Union[str, Any] )-> Any: super().tearDownClass() torch.use_deterministic_algorithms(__a ) def a__ ( self: Tuple )-> Union[str, Any]: torch.manual_seed(0 ) lowerCamelCase : str = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , ) lowerCamelCase : Union[str, Any] = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=__a , set_alpha_to_one=__a , ) torch.manual_seed(0 ) lowerCamelCase : Union[str, Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) lowerCamelCase : str = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , ) lowerCamelCase : Optional[int] = CLIPTextModel(__a ) lowerCamelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowerCamelCase : List[str] = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def a__ ( self: Tuple , __a: int , __a: Union[str, Any]=0 )-> Optional[Any]: if str(__a ).startswith("""mps""" ): lowerCamelCase : Tuple = torch.manual_seed(__a ) else: lowerCamelCase : str = torch.Generator(device=__a ).manual_seed(__a ) lowerCamelCase : Dict = { """prompt""": """a cat and a frog""", """token_indices""": [2, 5], """generator""": generator, """num_inference_steps""": 1, """guidance_scale""": 6.0, """output_type""": """numpy""", """max_iter_to_alter""": 2, """thresholds""": {0: 0.7}, } return inputs def a__ ( self: Dict )-> str: lowerCamelCase : Tuple = """cpu""" lowerCamelCase : List[str] = self.get_dummy_components() lowerCamelCase : List[Any] = self.pipeline_class(**__a ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) lowerCamelCase : Any = self.get_dummy_inputs(__a ) lowerCamelCase : Union[str, Any] = pipe(**__a ).images lowerCamelCase : Tuple = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 64, 64, 3) ) lowerCamelCase : Optional[Any] = np.array( [0.63_90_53_64, 0.62_89_73_07, 0.48_59_90_17, 0.5_13_36_24, 0.5_55_00_48, 0.45_76_95_16, 0.50_32_69_73, 0.5_02_31_39, 0.45_38_44_96] ) lowerCamelCase : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(__a , 1e-3 ) def a__ ( self: int )-> Optional[Any]: super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 ) def a__ ( self: Union[str, Any] )-> Optional[int]: # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def a__ ( self: Tuple )-> int: self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 ) def a__ ( self: Dict )-> List[Any]: super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 ) def a__ ( self: Optional[int] )-> Dict: super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 ) def a__ ( self: Any )-> Tuple: super().test_save_load_local(expected_max_difference=5e-4 ) def a__ ( self: str )-> str: super().test_save_load_optional_components(expected_max_difference=4e-4 ) @require_torch_gpu @slow class A__ ( unittest.TestCase): """simple docstring""" @classmethod def a__ ( cls: Any )-> Tuple: super().setUpClass() torch.use_deterministic_algorithms(__a ) @classmethod def a__ ( cls: Dict )-> Optional[int]: super().tearDownClass() torch.use_deterministic_algorithms(__a ) def a__ ( self: int )-> Optional[int]: super().tearDown() gc.collect() torch.cuda.empty_cache() def a__ ( self: int )-> Optional[Any]: lowerCamelCase : List[Any] = torch.manual_seed(51 ) lowerCamelCase : List[str] = StableDiffusionAttendAndExcitePipeline.from_pretrained( """CompVis/stable-diffusion-v1-4""" , safety_checker=__a , torch_dtype=torch.floataa ) pipe.to("""cuda""" ) lowerCamelCase : Dict = """a painting of an elephant with glasses""" lowerCamelCase : Any = [5, 7] lowerCamelCase : Tuple = pipe( prompt=__a , token_indices=__a , guidance_scale=7.5 , generator=__a , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0] lowerCamelCase : Union[str, Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" ) assert np.abs((expected_image - image).max() ) < 5e-1
42
0
"""simple docstring""" import warnings from ...utils import logging from .image_processing_videomae import VideoMAEImageProcessor __lowerCamelCase :str = logging.get_logger(__name__) class A__ ( lowercase__): """simple docstring""" def __init__( self: int , *__a: Union[str, Any] , **__a: List[Any] )-> List[Any]: warnings.warn( """The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use VideoMAEImageProcessor instead.""" , __lowerCamelCase , ) super().__init__(*__lowerCamelCase , **__lowerCamelCase )
705
"""simple docstring""" import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel from transformers.models.esm.modeling_esm import ( ESM_PRETRAINED_MODEL_ARCHIVE_LIST, EsmEmbeddings, create_position_ids_from_input_ids, ) class A__ : """simple docstring""" def __init__( self: List[str] , __a: List[str] , __a: Dict=13 , __a: Tuple=7 , __a: Dict=False , __a: str=True , __a: List[Any]=False , __a: Dict=True , __a: Any=33 , __a: Optional[Any]=32 , __a: List[Any]=5 , __a: Any=4 , __a: Dict=37 , __a: str="gelu" , __a: str=0.1 , __a: int=0.1 , __a: Optional[int]=512 , __a: List[Any]=16 , __a: int=2 , __a: int=0.02 , __a: Optional[int]=3 , __a: str=4 , __a: Tuple=None , )-> Tuple: lowerCamelCase : Union[str, Any] = parent lowerCamelCase : Tuple = batch_size lowerCamelCase : Any = seq_length lowerCamelCase : Any = is_training lowerCamelCase : Tuple = use_input_mask lowerCamelCase : int = use_token_type_ids lowerCamelCase : List[str] = use_labels lowerCamelCase : Optional[int] = vocab_size lowerCamelCase : Tuple = hidden_size lowerCamelCase : List[str] = num_hidden_layers lowerCamelCase : Optional[int] = num_attention_heads lowerCamelCase : Optional[Any] = intermediate_size lowerCamelCase : Optional[Any] = hidden_act lowerCamelCase : Union[str, Any] = hidden_dropout_prob lowerCamelCase : Optional[Any] = attention_probs_dropout_prob lowerCamelCase : Any = max_position_embeddings lowerCamelCase : str = type_vocab_size lowerCamelCase : List[Any] = type_sequence_label_size lowerCamelCase : Optional[Any] = initializer_range lowerCamelCase : Union[str, Any] = num_labels lowerCamelCase : Optional[Any] = num_choices lowerCamelCase : Any = scope def a__ ( self: Optional[int] )-> List[Any]: lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase : Dict = None if self.use_input_mask: lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase : Any = None lowerCamelCase : int = None lowerCamelCase : Union[str, Any] = None if self.use_labels: lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase : List[str] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def a__ ( self: Tuple )-> Union[str, Any]: return EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def a__ ( self: List[Any] , __a: List[str] , __a: str , __a: Tuple , __a: List[str] , __a: List[str] , __a: str )-> int: lowerCamelCase : Optional[int] = EsmModel(config=__a ) model.to(__a ) model.eval() lowerCamelCase : int = model(__a , attention_mask=__a ) lowerCamelCase : str = model(__a ) lowerCamelCase : Optional[Any] = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def a__ ( self: int , __a: Union[str, Any] , __a: Optional[int] , __a: List[str] , __a: str , __a: List[str] , __a: Tuple )-> int: lowerCamelCase : str = EsmForMaskedLM(config=__a ) model.to(__a ) model.eval() lowerCamelCase : List[Any] = model(__a , attention_mask=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a__ ( self: List[str] , __a: List[Any] , __a: List[str] , __a: int , __a: Union[str, Any] , __a: List[Any] , __a: Tuple )-> List[str]: lowerCamelCase : Tuple = self.num_labels lowerCamelCase : Dict = EsmForTokenClassification(config=__a ) model.to(__a ) model.eval() lowerCamelCase : int = model(__a , attention_mask=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def a__ ( self: Optional[int] )-> Optional[int]: lowerCamelCase : Any = self.prepare_config_and_inputs() ( ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ) : Tuple = config_and_inputs lowerCamelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class A__ ( __lowercase , __lowercase , unittest.TestCase): """simple docstring""" snake_case__ : Any =False snake_case__ : Dict =( ( EsmForMaskedLM, EsmModel, EsmForSequenceClassification, EsmForTokenClassification, ) if is_torch_available() else () ) snake_case__ : Dict =() snake_case__ : Optional[int] =( { '''feature-extraction''': EsmModel, '''fill-mask''': EsmForMaskedLM, '''text-classification''': EsmForSequenceClassification, '''token-classification''': EsmForTokenClassification, '''zero-shot''': EsmForSequenceClassification, } if is_torch_available() else {} ) snake_case__ : Any =True def a__ ( self: Optional[int] )-> Optional[int]: lowerCamelCase : Optional[Any] = EsmModelTester(self ) lowerCamelCase : Any = ConfigTester(self , config_class=__a , hidden_size=37 ) def a__ ( self: List[Any] )-> Optional[Any]: self.config_tester.run_common_tests() def a__ ( self: int )-> Optional[Any]: lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def a__ ( self: Tuple )-> Any: lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCamelCase : Tuple = type self.model_tester.create_and_check_model(*__a ) def a__ ( self: List[str] )-> Tuple: lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__a ) def a__ ( self: int )-> Optional[Any]: lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__a ) @slow def a__ ( self: Any )-> List[Any]: for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase : int = EsmModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def a__ ( self: str )-> List[str]: lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0] lowerCamelCase : Union[str, Any] = EsmEmbeddings(config=__a ) lowerCamelCase : List[str] = torch.as_tensor([[12, 31, 13, model.padding_idx]] ) lowerCamelCase : Union[str, Any] = torch.as_tensor( [ [ 0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx, ] ] ) lowerCamelCase : Optional[Any] = create_position_ids_from_input_ids(__a , model.padding_idx ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__a , __a ) ) ) def a__ ( self: Optional[int] )-> int: lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()[0] lowerCamelCase : Any = EsmEmbeddings(config=__a ) lowerCamelCase : Dict = torch.empty(2 , 4 , 30 ) lowerCamelCase : List[Any] = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] lowerCamelCase : Any = torch.as_tensor([expected_single_positions, expected_single_positions] ) lowerCamelCase : List[str] = embeddings.create_position_ids_from_inputs_embeds(__a ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__a , __a ) ) ) @unittest.skip("""Esm does not support embedding resizing""" ) def a__ ( self: Any )-> Optional[Any]: pass @unittest.skip("""Esm does not support embedding resizing""" ) def a__ ( self: Dict )-> Dict: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def a__ ( self: List[str] )-> Dict: pass @require_torch class A__ ( __lowercase): """simple docstring""" @slow def a__ ( self: Any )-> Union[str, Any]: with torch.no_grad(): lowerCamelCase : Union[str, Any] = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" ) model.eval() lowerCamelCase : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] ) lowerCamelCase : Tuple = model(__a )[0] lowerCamelCase : Dict = 33 lowerCamelCase : List[str] = torch.Size((1, 6, vocab_size) ) self.assertEqual(output.shape , __a ) lowerCamelCase : Tuple = torch.tensor( [[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) ) @slow def a__ ( self: Dict )-> str: with torch.no_grad(): lowerCamelCase : Any = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" ) model.eval() lowerCamelCase : Optional[Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) lowerCamelCase : Any = model(__a )[0] # compare the actual values for a slice. lowerCamelCase : Tuple = torch.tensor( [[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
42
0
"""simple docstring""" import argparse import re import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SamConfig, SamImageProcessor, SamModel, SamProcessor, SamVisionConfig, ) __lowerCamelCase :Tuple = { 'iou_prediction_head.layers.0': 'iou_prediction_head.proj_in', 'iou_prediction_head.layers.1': 'iou_prediction_head.layers.0', 'iou_prediction_head.layers.2': 'iou_prediction_head.proj_out', 'mask_decoder.output_upscaling.0': 'mask_decoder.upscale_conv1', 'mask_decoder.output_upscaling.1': 'mask_decoder.upscale_layer_norm', 'mask_decoder.output_upscaling.3': 'mask_decoder.upscale_conv2', 'mask_downscaling.0': 'mask_embed.conv1', 'mask_downscaling.1': 'mask_embed.layer_norm1', 'mask_downscaling.3': 'mask_embed.conv2', 'mask_downscaling.4': 'mask_embed.layer_norm2', 'mask_downscaling.6': 'mask_embed.conv3', 'point_embeddings': 'point_embed', 'pe_layer.positional_encoding_gaussian_matrix': 'shared_embedding.positional_embedding', 'image_encoder': 'vision_encoder', 'neck.0': 'neck.conv1', 'neck.1': 'neck.layer_norm1', 'neck.2': 'neck.conv2', 'neck.3': 'neck.layer_norm2', 'patch_embed.proj': 'patch_embed.projection', '.norm': '.layer_norm', 'blocks': 'layers', } def snake_case ( UpperCamelCase__ : List[Any] ) -> int: lowerCamelCase : Union[str, Any] = {} state_dict.pop("""pixel_mean""" , lowerCamelCase_ ) state_dict.pop("""pixel_std""" , lowerCamelCase_ ) lowerCamelCase : str = R'.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*' for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: lowerCamelCase : int = key.replace(lowerCamelCase_ , lowerCamelCase_ ) if re.match(lowerCamelCase_ , lowerCamelCase_ ): lowerCamelCase : int = int(re.match(lowerCamelCase_ , lowerCamelCase_ ).group(2 ) ) if layer_nb == 0: lowerCamelCase : Optional[Any] = key.replace("""layers.0""" , """proj_in""" ) elif layer_nb == 1: lowerCamelCase : List[str] = key.replace("""layers.1""" , """layers.0""" ) elif layer_nb == 2: lowerCamelCase : List[Any] = key.replace("""layers.2""" , """proj_out""" ) lowerCamelCase : Any = value lowerCamelCase : int = model_state_dict[ 'prompt_encoder.shared_embedding.positional_embedding' ] return model_state_dict def snake_case ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : int="ybelkada/segment-anything" ) -> Any: lowerCamelCase : Dict = hf_hub_download(lowerCamelCase_ , F'checkpoints/{model_name}.pth' ) if "sam_vit_b" in model_name: lowerCamelCase : Tuple = SamConfig() elif "sam_vit_l" in model_name: lowerCamelCase : Union[str, Any] = SamVisionConfig( hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , ) lowerCamelCase : Tuple = SamConfig( vision_config=lowerCamelCase_ , ) elif "sam_vit_h" in model_name: lowerCamelCase : Optional[Any] = SamVisionConfig( hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , ) lowerCamelCase : str = SamConfig( vision_config=lowerCamelCase_ , ) lowerCamelCase : Optional[int] = torch.load(lowerCamelCase_ , map_location="""cpu""" ) lowerCamelCase : Union[str, Any] = replace_keys(lowerCamelCase_ ) lowerCamelCase : int = SamImageProcessor() lowerCamelCase : Dict = SamProcessor(image_processor=lowerCamelCase_ ) lowerCamelCase : Union[str, Any] = SamModel(lowerCamelCase_ ) hf_model.load_state_dict(lowerCamelCase_ ) lowerCamelCase : Dict = hf_model.to("""cuda""" ) lowerCamelCase : Dict = 'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png' lowerCamelCase : Any = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert("""RGB""" ) lowerCamelCase : Dict = [[[400, 650]]] lowerCamelCase : List[str] = [[1]] lowerCamelCase : Any = processor(images=np.array(lowerCamelCase_ ) , return_tensors="""pt""" ).to("""cuda""" ) with torch.no_grad(): lowerCamelCase : Tuple = hf_model(**lowerCamelCase_ ) lowerCamelCase : str = output.iou_scores.squeeze() if model_name == "sam_vit_h_4b8939": assert scores[-1].item() == 0.5_7_9_8_9_0_2_5_1_1_5_9_6_6_8 lowerCamelCase : Union[str, Any] = processor( images=np.array(lowerCamelCase_ ) , input_points=lowerCamelCase_ , input_labels=lowerCamelCase_ , return_tensors="""pt""" ).to("""cuda""" ) with torch.no_grad(): lowerCamelCase : List[str] = hf_model(**lowerCamelCase_ ) lowerCamelCase : List[Any] = output.iou_scores.squeeze() assert scores[-1].item() == 0.9_7_1_2_6_0_3_0_9_2_1_9_3_6_0_4 lowerCamelCase : Tuple = ((75, 275, 1725, 850),) lowerCamelCase : str = processor(images=np.array(lowerCamelCase_ ) , input_boxes=lowerCamelCase_ , return_tensors="""pt""" ).to("""cuda""" ) with torch.no_grad(): lowerCamelCase : Tuple = hf_model(**lowerCamelCase_ ) lowerCamelCase : Union[str, Any] = output.iou_scores.squeeze() assert scores[-1].item() == 0.8_6_8_6_0_1_5_6_0_5_9_2_6_5_1_4 # Test with 2 points and 1 image. lowerCamelCase : str = [[[400, 650], [800, 650]]] lowerCamelCase : Optional[int] = [[1, 1]] lowerCamelCase : Any = processor( images=np.array(lowerCamelCase_ ) , input_points=lowerCamelCase_ , input_labels=lowerCamelCase_ , return_tensors="""pt""" ).to("""cuda""" ) with torch.no_grad(): lowerCamelCase : List[str] = hf_model(**lowerCamelCase_ ) lowerCamelCase : Optional[int] = output.iou_scores.squeeze() assert scores[-1].item() == 0.9_9_3_6_0_4_7_7_9_2_4_3_4_6_9_2 if __name__ == "__main__": __lowerCamelCase :Union[str, Any] = argparse.ArgumentParser() __lowerCamelCase :Dict = ['sam_vit_b_01ec64', 'sam_vit_h_4b8939', 'sam_vit_l_0b3195'] parser.add_argument( '--model_name', default='sam_vit_h_4b8939', choices=choices, type=str, help='Path to hf config.json of model to convert', ) parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model and processor to the hub after converting', ) parser.add_argument( '--model_hub_id', default='ybelkada/segment-anything', choices=choices, type=str, help='Path to hf config.json of model to convert', ) __lowerCamelCase :Union[str, Any] = parser.parse_args() convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
706
"""simple docstring""" import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __lowerCamelCase :str = get_tests_dir('fixtures/spiece.model') @require_sentencepiece @require_tokenizers class A__ ( __lowercase , unittest.TestCase): """simple docstring""" snake_case__ : List[str] =AlbertTokenizer snake_case__ : Optional[Any] =AlbertTokenizerFast snake_case__ : Optional[int] =True snake_case__ : Any =True snake_case__ : Optional[int] =True def a__ ( self: Dict )-> Optional[Any]: super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase : int = AlbertTokenizer(__a ) tokenizer.save_pretrained(self.tmpdirname ) def a__ ( self: Tuple , __a: Tuple )-> Union[str, Any]: lowerCamelCase : List[str] = """this is a test""" lowerCamelCase : int = """this is a test""" return input_text, output_text def a__ ( self: Any )-> List[Any]: lowerCamelCase : int = """<pad>""" lowerCamelCase : Optional[int] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a ) def a__ ( self: Tuple )-> str: lowerCamelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<pad>""" ) self.assertEqual(vocab_keys[1] , """<unk>""" ) self.assertEqual(vocab_keys[-1] , """▁eloquent""" ) self.assertEqual(len(__a ) , 30_000 ) def a__ ( self: List[str] )-> Any: self.assertEqual(self.get_tokenizer().vocab_size , 30_000 ) def a__ ( self: Optional[Any] )-> Union[str, Any]: if not self.test_rust_tokenizer: return lowerCamelCase : str = self.get_tokenizer() lowerCamelCase : Tuple = self.get_rust_tokenizer() lowerCamelCase : Union[str, Any] = """I was born in 92000, and this is falsé.""" lowerCamelCase : List[str] = tokenizer.tokenize(__a ) lowerCamelCase : Tuple = rust_tokenizer.tokenize(__a ) self.assertListEqual(__a , __a ) lowerCamelCase : Dict = tokenizer.encode(__a , add_special_tokens=__a ) lowerCamelCase : List[str] = rust_tokenizer.encode(__a , add_special_tokens=__a ) self.assertListEqual(__a , __a ) lowerCamelCase : Any = self.get_rust_tokenizer() lowerCamelCase : List[str] = tokenizer.encode(__a ) lowerCamelCase : str = rust_tokenizer.encode(__a ) self.assertListEqual(__a , __a ) def a__ ( self: Tuple )-> List[Any]: lowerCamelCase : List[str] = AlbertTokenizer(__a , keep_accents=__a ) lowerCamelCase : int = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(__a , ["""▁this""", """▁is""", """▁a""", """▁test"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [48, 25, 21, 1_289] ) lowerCamelCase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( __a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] ) lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(__a ) self.assertListEqual(__a , [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] ) lowerCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__a ) self.assertListEqual( __a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , ) def a__ ( self: Tuple )-> str: lowerCamelCase : str = AlbertTokenizer(__a ) lowerCamelCase : Union[str, Any] = tokenizer.encode("""sequence builders""" ) lowerCamelCase : List[Any] = tokenizer.encode("""multi-sequence build""" ) lowerCamelCase : Any = tokenizer.build_inputs_with_special_tokens(__a ) lowerCamelCase : str = tokenizer.build_inputs_with_special_tokens(__a , __a ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ] @slow def a__ ( self: Any )-> Dict: # fmt: off lowerCamelCase : Optional[Any] = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__a , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
42
0
import os import numpy import onnx def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any ) -> List[str]: lowerCamelCase : Optional[Any] = a.name lowerCamelCase : List[str] = b.name lowerCamelCase : int = '' lowerCamelCase : Dict = '' lowerCamelCase : Optional[Any] = a == b lowerCamelCase : Optional[Any] = name_a lowerCamelCase : Optional[Any] = name_b return res def snake_case ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any ) -> Union[str, Any]: for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(UpperCamelCase__ , UpperCamelCase__ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ ) _graph_replace_input_with(node_proto.attribute[1].g , UpperCamelCase__ , UpperCamelCase__ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase__ , UpperCamelCase__ ) def snake_case ( UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any ) -> Tuple: for n in graph_proto.node: _node_replace_input_with(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def snake_case ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : str ) -> List[Any]: lowerCamelCase : Tuple = list(model.graph.initializer ) lowerCamelCase : List[str] = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i lowerCamelCase : List[str] = inits[i].name lowerCamelCase : Dict = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , UpperCamelCase__ , UpperCamelCase__ ) def snake_case ( UpperCamelCase__ : List[Any] ) -> str: lowerCamelCase : List[Any] = os.path.dirname(UpperCamelCase__ ) lowerCamelCase : List[str] = os.path.basename(UpperCamelCase__ ) lowerCamelCase : Dict = onnx.load(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) lowerCamelCase : Optional[int] = list(model.graph.initializer ) lowerCamelCase : Tuple = set() lowerCamelCase : int = {} lowerCamelCase : Optional[Any] = [] lowerCamelCase : Optional[Any] = 0 for i in range(len(UpperCamelCase__ ) ): if i in dup_set: continue for j in range(i + 1 , len(UpperCamelCase__ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(UpperCamelCase__ ) dup_set.add(UpperCamelCase__ ) lowerCamelCase : Any = inits[j].data_type lowerCamelCase : Dict = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print("""unexpected data type: """ , UpperCamelCase__ ) total_reduced_size += mem_size lowerCamelCase : List[str] = inits[i].name lowerCamelCase : List[Any] = inits[j].name if name_i in dup_map: dup_map[name_i].append(UpperCamelCase__ ) else: lowerCamelCase : str = [name_j] ind_to_replace.append((j, i) ) print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" ) lowerCamelCase : Optional[Any] = sorted(UpperCamelCase__ ) _remove_dup_initializers_from_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase : Dict = 'optimized_' + model_file_name lowerCamelCase : str = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) onnx.save(UpperCamelCase__ , UpperCamelCase__ ) return new_model
707
"""simple docstring""" __lowerCamelCase :List[Any] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} __lowerCamelCase :Union[str, Any] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def snake_case ( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] ) -> list[int]: lowerCamelCase : Tuple = True lowerCamelCase : Any = [] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) order.append(UpperCamelCase__ ) return order def snake_case ( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] ) -> list[int]: lowerCamelCase : List[Any] = True lowerCamelCase : int = [vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return component def snake_case ( UpperCamelCase__ : dict[int, list[int]] ) -> list[list[int]]: lowerCamelCase : int = len(UpperCamelCase__ ) * [False] lowerCamelCase : dict[int, list[int]] = {vert: [] for vert in range(len(UpperCamelCase__ ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(UpperCamelCase__ ) lowerCamelCase : int = [] for i, was_visited in enumerate(UpperCamelCase__ ): if not was_visited: order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase : Tuple = [] lowerCamelCase : str = len(UpperCamelCase__ ) * [False] for i in range(len(UpperCamelCase__ ) ): lowerCamelCase : Any = order[len(UpperCamelCase__ ) - i - 1] if not visited[vert]: lowerCamelCase : List[str] = find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) components_list.append(UpperCamelCase__ ) return components_list
42
0
"""simple docstring""" import datasets from .evaluate import evaluate __lowerCamelCase :int = '\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n' __lowerCamelCase :Optional[Any] = '\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n' __lowerCamelCase :str = '\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric("squad")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class A__ ( datasets.Metric): """simple docstring""" def a__ ( self: str )-> List[str]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": {"""id""": datasets.Value("""string""" ), """prediction_text""": datasets.Value("""string""" )}, """references""": { """id""": datasets.Value("""string""" ), """answers""": datasets.features.Sequence( { """text""": datasets.Value("""string""" ), """answer_start""": datasets.Value("""int32""" ), } ), }, } ) , codebase_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , reference_urls=["""https://rajpurkar.github.io/SQuAD-explorer/"""] , ) def a__ ( self: Dict , __a: Optional[Any] , __a: List[str] )-> Tuple: lowerCamelCase : str = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions} lowerCamelCase : int = [ { '''paragraphs''': [ { '''qas''': [ { '''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']], '''id''': ref['''id'''], } for ref in references ] } ] } ] lowerCamelCase : Union[str, Any] = evaluate(dataset=__a , predictions=__a ) return score
708
"""simple docstring""" from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase :str = logging.get_logger(__name__) __lowerCamelCase :Any = { 'huggingface/time-series-transformer-tourism-monthly': ( 'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json' ), # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer } class A__ ( __lowercase): """simple docstring""" snake_case__ : List[Any] ='''time_series_transformer''' snake_case__ : List[Any] ={ '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', '''num_hidden_layers''': '''encoder_layers''', } def __init__( self: List[str] , __a: Optional[int] = None , __a: Optional[int] = None , __a: str = "student_t" , __a: str = "nll" , __a: int = 1 , __a: List[int] = [1, 2, 3, 4, 5, 6, 7] , __a: Optional[Union[str, bool]] = "mean" , __a: int = 0 , __a: int = 0 , __a: int = 0 , __a: int = 0 , __a: Optional[List[int]] = None , __a: Optional[List[int]] = None , __a: int = 32 , __a: int = 32 , __a: int = 2 , __a: int = 2 , __a: int = 2 , __a: int = 2 , __a: bool = True , __a: str = "gelu" , __a: int = 64 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: int = 100 , __a: float = 0.02 , __a: Tuple=True , **__a: str , )-> Any: # time series specific configuration lowerCamelCase : str = prediction_length lowerCamelCase : Optional[Any] = context_length or prediction_length lowerCamelCase : Tuple = distribution_output lowerCamelCase : Any = loss lowerCamelCase : List[Any] = input_size lowerCamelCase : int = num_time_features lowerCamelCase : Dict = lags_sequence lowerCamelCase : Optional[int] = scaling lowerCamelCase : int = num_dynamic_real_features lowerCamelCase : Tuple = num_static_real_features lowerCamelCase : Any = num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(__a ) != num_static_categorical_features: raise ValueError( """The cardinality should be a list of the same length as `num_static_categorical_features`""" ) lowerCamelCase : int = cardinality else: lowerCamelCase : Dict = [0] if embedding_dimension and num_static_categorical_features > 0: if len(__a ) != num_static_categorical_features: raise ValueError( """The embedding dimension should be a list of the same length as `num_static_categorical_features`""" ) lowerCamelCase : str = embedding_dimension else: lowerCamelCase : str = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] lowerCamelCase : Any = num_parallel_samples # Transformer architecture configuration lowerCamelCase : Any = input_size * len(__a ) + self._number_of_features lowerCamelCase : List[str] = d_model lowerCamelCase : Tuple = encoder_attention_heads lowerCamelCase : Optional[int] = decoder_attention_heads lowerCamelCase : Union[str, Any] = encoder_ffn_dim lowerCamelCase : str = decoder_ffn_dim lowerCamelCase : str = encoder_layers lowerCamelCase : Any = decoder_layers lowerCamelCase : Optional[int] = dropout lowerCamelCase : List[str] = attention_dropout lowerCamelCase : Tuple = activation_dropout lowerCamelCase : Optional[int] = encoder_layerdrop lowerCamelCase : int = decoder_layerdrop lowerCamelCase : Optional[int] = activation_function lowerCamelCase : Optional[Any] = init_std lowerCamelCase : Optional[Any] = use_cache super().__init__(is_encoder_decoder=__a , **__a ) @property def a__ ( self: int )-> int: return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
42
0
"""simple docstring""" import cva import numpy as np class A__ : """simple docstring""" def __init__( self: List[Any] , __a: List[Any] , __a: Any )-> Tuple: if k in (0.04, 0.06): lowerCamelCase : Dict = k lowerCamelCase : str = window_size else: raise ValueError("""invalid k value""" ) def __str__( self: Optional[int] )-> Tuple: return str(self.k ) def a__ ( self: List[Any] , __a: Dict )-> Union[str, Any]: lowerCamelCase : Tuple = cva.imread(_lowerCAmelCase , 0 ) lowerCamelCase , lowerCamelCase : str = img.shape lowerCamelCase : List[str] = [] lowerCamelCase : Any = img.copy() lowerCamelCase : Tuple = cva.cvtColor(_lowerCAmelCase , cva.COLOR_GRAY2RGB ) lowerCamelCase , lowerCamelCase : int = np.gradient(_lowerCAmelCase ) lowerCamelCase : Optional[int] = dx**2 lowerCamelCase : Tuple = dy**2 lowerCamelCase : Dict = dx * dy lowerCamelCase : Optional[int] = 0.04 lowerCamelCase : str = self.window_size // 2 for y in range(_lowerCAmelCase , h - offset ): for x in range(_lowerCAmelCase , w - offset ): lowerCamelCase : List[Any] = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase : Union[str, Any] = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase : Tuple = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase : List[Any] = (wxx * wyy) - (wxy**2) lowerCamelCase : Tuple = wxx + wyy lowerCamelCase : Dict = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 255 ) return color_img, corner_list if __name__ == "__main__": __lowerCamelCase :Optional[Any] = HarrisCorner(0.04, 3) __lowerCamelCase :Tuple = edge_detect.detect('path_to_image') cva.imwrite('detect.png', color_img)
709
"""simple docstring""" from __future__ import annotations __lowerCamelCase :int = 10 def snake_case ( UpperCamelCase__ : list[int] ) -> list[int]: lowerCamelCase : int = 1 lowerCamelCase : Union[str, Any] = max(UpperCamelCase__ ) while placement <= max_digit: # declare and initialize empty buckets lowerCamelCase : list[list] = [[] for _ in range(UpperCamelCase__ )] # split list_of_ints between the buckets for i in list_of_ints: lowerCamelCase : Any = int((i / placement) % RADIX ) buckets[tmp].append(UpperCamelCase__ ) # put each buckets' contents into list_of_ints lowerCamelCase : Dict = 0 for b in range(UpperCamelCase__ ): for i in buckets[b]: lowerCamelCase : List[str] = i a += 1 # move to next placement *= RADIX return list_of_ints if __name__ == "__main__": import doctest doctest.testmod()
42
0
"""simple docstring""" def snake_case ( UpperCamelCase__ : Union[str, Any] = 10**12 ) -> int: lowerCamelCase : Any = 1 lowerCamelCase : int = 0 lowerCamelCase : str = 1 lowerCamelCase : Union[str, Any] = 1 while numerator <= 2 * min_total - 1: prev_numerator += 2 * numerator numerator += 2 * prev_numerator prev_denominator += 2 * denominator denominator += 2 * prev_denominator return (denominator + 1) // 2 if __name__ == "__main__": print(f"""{solution() = }""")
710
"""simple docstring""" import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=None ) -> Tuple: # set parameter of one layer assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match' lowerCamelCase : Dict = nn.Parameter(UpperCamelCase__ ) if bias is not None: assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match' lowerCamelCase : Any = nn.Parameter(UpperCamelCase__ ) def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ) -> Union[str, Any]: # set torch weights for 1-to-1 comparison lowerCamelCase : Dict = np.asarray(weights[0] ) lowerCamelCase : List[Any] = np.asarray(weights[1] ) lowerCamelCase : List[str] = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , ) set_param( torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , ) def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any ) -> List[Any]: # set torch weights for 1-to-1 comparison lowerCamelCase : Tuple = np.asarray(weights[0] ) lowerCamelCase : Any = np.asarray(weights[1] ) lowerCamelCase : List[Any] = np.asarray(weights[2] ) lowerCamelCase : List[str] = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , ) set_param( torch_layer.self_attention.key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , ) set_param( torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , ) def snake_case ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ) -> Optional[Any]: # layernorm 1 lowerCamelCase : str = weights[0][0][0] lowerCamelCase : Optional[int] = np.asarray(layer_norm_a[0] ) lowerCamelCase : Tuple = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , ) # lsh weights + output lowerCamelCase : List[Any] = weights[0][1] if len(UpperCamelCase__ ) < 4: set_layer_weights_in_torch_lsh(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ ) else: set_layer_weights_in_torch_local(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ ) # intermediate weighs lowerCamelCase : int = weights[2][0][1][2] # Chunked Feed Forward if len(UpperCamelCase__ ) == 4: lowerCamelCase : Dict = intermediate_weights[2] # layernorm 2 lowerCamelCase : Optional[int] = np.asarray(intermediate_weights[0][0] ) lowerCamelCase : Tuple = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , ) # intermediate dense lowerCamelCase : Optional[Any] = np.asarray(intermediate_weights[1][0] ) lowerCamelCase : Union[str, Any] = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , ) # intermediate out lowerCamelCase : Optional[int] = np.asarray(intermediate_weights[4][0] ) lowerCamelCase : List[Any] = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , ) def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int ) -> List[Any]: # reformer model lowerCamelCase : List[Any] = torch_model.reformer # word embeds lowerCamelCase : Union[str, Any] = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(UpperCamelCase__ ) , ) if isinstance(weights[3] , UpperCamelCase__ ): lowerCamelCase : Union[str, Any] = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): lowerCamelCase : str = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), F'{position_embeddings[emb_idx]} emb does not match' lowerCamelCase : Dict = nn.Parameter(torch.tensor(UpperCamelCase__ ) ) lowerCamelCase : int = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( UpperCamelCase__ ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): lowerCamelCase : Dict = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # output layer norm lowerCamelCase : Any = np.asarray(weights[7][0] ) lowerCamelCase : List[str] = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , ) # output embeddings lowerCamelCase : List[Any] = np.asarray(weights[9][0] ) lowerCamelCase : Optional[int] = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , ) def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str ) -> Optional[int]: # Initialise PyTorch model lowerCamelCase : Union[str, Any] = ReformerConfig.from_json_file(UpperCamelCase__ ) print(F'Building PyTorch model from configuration: {config}' ) lowerCamelCase : str = ReformerModelWithLMHead(UpperCamelCase__ ) with open(UpperCamelCase__ , """rb""" ) as f: lowerCamelCase : str = pickle.load(UpperCamelCase__ )["""weights"""] set_model_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , config.hidden_size ) # Save pytorch-model print(F'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , UpperCamelCase__ ) if __name__ == "__main__": __lowerCamelCase :Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained Reformer model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __lowerCamelCase :Optional[int] = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
42
0
"""simple docstring""" import copy from collections import OrderedDict from typing import Dict, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING __lowerCamelCase :int = logging.get_logger(__name__) __lowerCamelCase :Optional[Any] = { 'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json', # See all DETR models at https://huggingface.co/models?filter=detr } class A__ ( __lowercase): """simple docstring""" snake_case__ : str ='''detr''' snake_case__ : List[str] =['''past_key_values'''] snake_case__ : str ={ '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', } def __init__( self: Optional[int] , __a: Tuple=True , __a: List[Any]=None , __a: List[str]=3 , __a: Any=100 , __a: Union[str, Any]=6 , __a: Union[str, Any]=2_048 , __a: str=8 , __a: Dict=6 , __a: Tuple=2_048 , __a: Optional[int]=8 , __a: int=0.0 , __a: Union[str, Any]=0.0 , __a: Optional[Any]=True , __a: Union[str, Any]="relu" , __a: str=256 , __a: int=0.1 , __a: List[str]=0.0 , __a: Union[str, Any]=0.0 , __a: int=0.02 , __a: Union[str, Any]=1.0 , __a: str=False , __a: int="sine" , __a: List[str]="resnet50" , __a: List[Any]=True , __a: Tuple=False , __a: Dict=1 , __a: Optional[Any]=5 , __a: Optional[int]=2 , __a: List[str]=1 , __a: int=1 , __a: str=5 , __a: List[Any]=2 , __a: Union[str, Any]=0.1 , **__a: Union[str, Any] , )-> int: if backbone_config is not None and use_timm_backbone: raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" ) if not use_timm_backbone: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) lowerCamelCase : int = CONFIG_MAPPING["resnet"](out_features=["""stage4"""] ) elif isinstance(__A , __A ): lowerCamelCase : Union[str, Any] = backbone_config.get("""model_type""" ) lowerCamelCase : Optional[Any] = CONFIG_MAPPING[backbone_model_type] lowerCamelCase : Optional[int] = config_class.from_dict(__A ) # set timm attributes to None lowerCamelCase : List[Any] = None, None, None lowerCamelCase : Tuple = use_timm_backbone lowerCamelCase : Dict = backbone_config lowerCamelCase : List[Any] = num_channels lowerCamelCase : Union[str, Any] = num_queries lowerCamelCase : Optional[Any] = d_model lowerCamelCase : Any = encoder_ffn_dim lowerCamelCase : Optional[Any] = encoder_layers lowerCamelCase : int = encoder_attention_heads lowerCamelCase : Union[str, Any] = decoder_ffn_dim lowerCamelCase : Dict = decoder_layers lowerCamelCase : Dict = decoder_attention_heads lowerCamelCase : Optional[Any] = dropout lowerCamelCase : Tuple = attention_dropout lowerCamelCase : Union[str, Any] = activation_dropout lowerCamelCase : List[Any] = activation_function lowerCamelCase : str = init_std lowerCamelCase : Dict = init_xavier_std lowerCamelCase : Tuple = encoder_layerdrop lowerCamelCase : List[str] = decoder_layerdrop lowerCamelCase : Optional[int] = encoder_layers lowerCamelCase : List[str] = auxiliary_loss lowerCamelCase : Optional[int] = position_embedding_type lowerCamelCase : Union[str, Any] = backbone lowerCamelCase : str = use_pretrained_backbone lowerCamelCase : Union[str, Any] = dilation # Hungarian matcher lowerCamelCase : Any = class_cost lowerCamelCase : int = bbox_cost lowerCamelCase : Tuple = giou_cost # Loss coefficients lowerCamelCase : Union[str, Any] = mask_loss_coefficient lowerCamelCase : Tuple = dice_loss_coefficient lowerCamelCase : str = bbox_loss_coefficient lowerCamelCase : Dict = giou_loss_coefficient lowerCamelCase : Any = eos_coefficient super().__init__(is_encoder_decoder=__A , **__A ) @property def a__ ( self: Tuple )-> int: return self.encoder_attention_heads @property def a__ ( self: str )-> Union[str, Any]: return self.d_model @classmethod def a__ ( cls: str , __a: PretrainedConfig , **__a: Optional[Any] )-> List[Any]: return cls(backbone_config=__A , **__A ) def a__ ( self: Optional[Any] )-> str: lowerCamelCase : Union[str, Any] = copy.deepcopy(self.__dict__ ) if output["backbone_config"] is not None: lowerCamelCase : List[Any] = self.backbone_config.to_dict() lowerCamelCase : List[str] = self.__class__.model_type return output class A__ ( __lowercase): """simple docstring""" snake_case__ : str =version.parse('''1.11''') @property def a__ ( self: Union[str, Any] )-> str: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""pixel_mask""", {0: """batch"""}), ] ) @property def a__ ( self: Union[str, Any] )-> Union[str, Any]: return 1e-5 @property def a__ ( self: Optional[int] )-> List[str]: return 12
711
"""simple docstring""" import inspect import unittest import torch import torch.nn as nn from accelerate.hooks import ( AlignDevicesHook, ModelHook, SequentialHook, add_hook_to_module, attach_align_device_hook, remove_hook_from_module, remove_hook_from_submodules, ) from accelerate.test_utils import require_multi_gpu class A__ ( nn.Module): """simple docstring""" def __init__( self: Dict )-> Dict: super().__init__() lowerCamelCase : Tuple = nn.Linear(3 , 4 ) lowerCamelCase : Optional[Any] = nn.BatchNormad(4 ) lowerCamelCase : Optional[Any] = nn.Linear(4 , 5 ) def a__ ( self: List[str] , __a: List[Any] )-> Optional[Any]: return self.lineara(self.batchnorm(self.lineara(__a ) ) ) class A__ ( __lowercase): """simple docstring""" def a__ ( self: Tuple , __a: int , *__a: Any , **__a: Tuple )-> Tuple: return (args[0] + 1,) + args[1:], kwargs class A__ ( __lowercase): """simple docstring""" def a__ ( self: Optional[int] , __a: List[str] , __a: List[Any] )-> List[str]: return output + 1 class A__ ( unittest.TestCase): """simple docstring""" def a__ ( self: int )-> str: lowerCamelCase : List[str] = ModelForTest() lowerCamelCase : Dict = ModelHook() add_hook_to_module(__a , __a ) self.assertEqual(test_model._hf_hook , __a ) self.assertTrue(hasattr(__a , """_old_forward""" ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , """forward""" ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] ) remove_hook_from_module(__a ) self.assertFalse(hasattr(__a , """_hf_hook""" ) ) self.assertFalse(hasattr(__a , """_old_forward""" ) ) def a__ ( self: int )-> str: lowerCamelCase : List[str] = ModelForTest() lowerCamelCase : Union[str, Any] = ModelHook() add_hook_to_module(__a , __a ) add_hook_to_module(__a , __a , append=__a ) self.assertEqual(isinstance(test_model._hf_hook , __a ) , __a ) self.assertEqual(len(test_model._hf_hook.hooks ) , 2 ) self.assertTrue(hasattr(__a , """_old_forward""" ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , """forward""" ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] ) remove_hook_from_module(__a ) self.assertFalse(hasattr(__a , """_hf_hook""" ) ) self.assertFalse(hasattr(__a , """_old_forward""" ) ) def a__ ( self: List[Any] )-> List[str]: lowerCamelCase : str = ModelForTest() lowerCamelCase : Dict = torch.randn(2 , 3 ) lowerCamelCase : Union[str, Any] = test_model(x + 1 ) lowerCamelCase : Optional[int] = test_model(x + 2 ) lowerCamelCase : List[Any] = PreForwardHook() add_hook_to_module(__a , __a ) lowerCamelCase : Optional[int] = test_model(__a ) self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain lowerCamelCase : Dict = PreForwardHook() add_hook_to_module(__a , __a ) lowerCamelCase : Tuple = test_model(__a ) self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) ) # You need to use the sequential hook to chain two or more hooks lowerCamelCase : Any = SequentialHook(PreForwardHook() , PreForwardHook() ) add_hook_to_module(__a , __a ) lowerCamelCase : Optional[Any] = test_model(__a ) assert torch.allclose(__a , __a , atol=1e-5 ) def a__ ( self: Any )-> Optional[int]: lowerCamelCase : str = ModelForTest() lowerCamelCase : List[str] = torch.randn(2 , 3 ) lowerCamelCase : int = test_model(__a ) lowerCamelCase : Dict = PostForwardHook() add_hook_to_module(__a , __a ) lowerCamelCase : Tuple = test_model(__a ) self.assertTrue(torch.allclose(__a , output + 1 , atol=1e-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain lowerCamelCase : str = PostForwardHook() add_hook_to_module(__a , __a ) lowerCamelCase : Optional[Any] = test_model(__a ) self.assertTrue(torch.allclose(__a , output + 1 , atol=1e-5 ) ) # You need to use the sequential hook to chain two or more hooks lowerCamelCase : Union[str, Any] = SequentialHook(PostForwardHook() , PostForwardHook() ) add_hook_to_module(__a , __a ) lowerCamelCase : str = test_model(__a ) assert torch.allclose(__a , output + 2 , atol=1e-5 ) def a__ ( self: int )-> Dict: lowerCamelCase : List[Any] = ModelForTest() lowerCamelCase : Optional[int] = torch.randn(2 , 3 ) lowerCamelCase : List[str] = test_model(__a ) lowerCamelCase : Any = PostForwardHook() add_hook_to_module(__a , __a ) lowerCamelCase : str = test_model(__a ) self.assertTrue(torch.allclose(__a , output + 1 ) ) self.assertTrue(outputa.requires_grad ) lowerCamelCase : Optional[int] = True lowerCamelCase : Optional[int] = test_model(__a ) self.assertFalse(outputa.requires_grad ) @require_multi_gpu def a__ ( self: List[str] )-> Union[str, Any]: lowerCamelCase : int = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) ) self.assertEqual(model.lineara.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) ) self.assertEqual(model.lineara.weight.device , torch.device(1 ) ) # We can still make a forward pass. The input does not need to be on any particular device lowerCamelCase : str = torch.randn(2 , 3 ) lowerCamelCase : Dict = model(__a ) self.assertEqual(output.device , torch.device(1 ) ) # We can add a general hook to put back output on same device as input. add_hook_to_module(__a , AlignDevicesHook(io_same_device=__a ) ) lowerCamelCase : Optional[int] = torch.randn(2 , 3 ).to(0 ) lowerCamelCase : str = model(__a ) self.assertEqual(output.device , torch.device(0 ) ) def a__ ( self: List[str] )-> Tuple: lowerCamelCase : Union[str, Any] = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices lowerCamelCase : Tuple = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True} add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) # Buffers are not included in the offload by default, so are on the execution device lowerCamelCase : List[Any] = torch.device(hook_kwargs["""execution_device"""] ) self.assertEqual(model.batchnorm.running_mean.device , __a ) lowerCamelCase : Optional[Any] = torch.randn(2 , 3 ) lowerCamelCase : Optional[Any] = model(__a ) self.assertEqual(output.device , __a ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # Now test with buffers included in the offload lowerCamelCase : Any = { """execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True, """offload_buffers""": True, } add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) ) lowerCamelCase : int = torch.randn(2 , 3 ) lowerCamelCase : Optional[int] = model(__a ) self.assertEqual(output.device , __a ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) def a__ ( self: Any )-> List[str]: lowerCamelCase : int = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices lowerCamelCase : int = 0 if torch.cuda.is_available() else """cpu""" attach_align_device_hook(__a , execution_device=__a , offload=__a ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) # Buffers are not included in the offload by default, so are on the execution device lowerCamelCase : List[Any] = torch.device(__a ) self.assertEqual(model.batchnorm.running_mean.device , __a ) lowerCamelCase : Dict = torch.randn(2 , 3 ) lowerCamelCase : Optional[Any] = model(__a ) self.assertEqual(output.device , __a ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(__a ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # Now test with buffers included in the offload attach_align_device_hook(__a , execution_device=__a , offload=__a , offload_buffers=__a ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) ) lowerCamelCase : Optional[int] = torch.randn(2 , 3 ) lowerCamelCase : int = model(__a ) self.assertEqual(output.device , __a ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(__a ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) def a__ ( self: Optional[Any] )-> List[Any]: lowerCamelCase : List[Any] = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices lowerCamelCase : Any = 0 if torch.cuda.is_available() else """cpu""" attach_align_device_hook( __a , execution_device=__a , offload=__a , weights_map=model.state_dict() ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) # Buffers are not included in the offload by default, so are on the execution device lowerCamelCase : List[Any] = torch.device(__a ) self.assertEqual(model.batchnorm.running_mean.device , __a ) lowerCamelCase : Dict = torch.randn(2 , 3 ) lowerCamelCase : int = model(__a ) self.assertEqual(output.device , __a ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(__a ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # Now test with buffers included in the offload attach_align_device_hook( __a , execution_device=__a , offload=__a , weights_map=model.state_dict() , offload_buffers=__a , ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) ) lowerCamelCase : Tuple = torch.randn(2 , 3 ) lowerCamelCase : Any = model(__a ) self.assertEqual(output.device , __a ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(__a ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
42
0
"""simple docstring""" from math import factorial def snake_case ( UpperCamelCase__ : Tuple = 20 ) -> int: lowerCamelCase : int = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1, # 2, 3,... lowerCamelCase : Dict = n // 2 return int(factorial(__lowerCAmelCase ) / (factorial(__lowerCAmelCase ) * factorial(n - k )) ) if __name__ == "__main__": import sys if len(sys.argv) == 1: print(solution(20)) else: try: __lowerCamelCase :List[str] = int(sys.argv[1]) print(solution(n)) except ValueError: print('Invalid entry - please enter a number.')
712
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __lowerCamelCase :Optional[Any] = { 'configuration_encodec': [ 'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP', 'EncodecConfig', ], 'feature_extraction_encodec': ['EncodecFeatureExtractor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase :Union[str, Any] = [ 'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST', 'EncodecModel', 'EncodecPreTrainedModel', ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys __lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
42
0
"""simple docstring""" import torch def snake_case ( ) -> Dict: if torch.cuda.is_available(): lowerCamelCase : Optional[Any] = torch.cuda.device_count() else: lowerCamelCase : Union[str, Any] = 0 print(F'Successfully ran on {num_gpus} GPUs' ) if __name__ == "__main__": main()
713
"""simple docstring""" import inspect import unittest from transformers import ConvNextConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A__ : """simple docstring""" def __init__( self: Optional[Any] , __a: Union[str, Any] , __a: Optional[Any]=13 , __a: Optional[Any]=32 , __a: Dict=3 , __a: int=4 , __a: Dict=[10, 20, 30, 40] , __a: int=[2, 2, 3, 2] , __a: Any=True , __a: List[Any]=True , __a: Any=37 , __a: Optional[int]="gelu" , __a: List[str]=10 , __a: Optional[int]=0.02 , __a: Dict=["stage2", "stage3", "stage4"] , __a: List[str]=[2, 3, 4] , __a: List[str]=None , )-> Union[str, Any]: lowerCamelCase : Optional[int] = parent lowerCamelCase : Optional[int] = batch_size lowerCamelCase : Any = image_size lowerCamelCase : Tuple = num_channels lowerCamelCase : str = num_stages lowerCamelCase : List[str] = hidden_sizes lowerCamelCase : str = depths lowerCamelCase : Dict = is_training lowerCamelCase : Optional[Any] = use_labels lowerCamelCase : List[str] = intermediate_size lowerCamelCase : List[str] = hidden_act lowerCamelCase : List[str] = num_labels lowerCamelCase : Union[str, Any] = initializer_range lowerCamelCase : List[Any] = out_features lowerCamelCase : Optional[Any] = out_indices lowerCamelCase : int = scope def a__ ( self: str )-> Optional[Any]: lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase : Dict = None if self.use_labels: lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.num_labels ) lowerCamelCase : Any = self.get_config() return config, pixel_values, labels def a__ ( self: Dict )-> Union[str, Any]: return ConvNextConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def a__ ( self: Optional[Any] , __a: List[Any] , __a: Any , __a: int )-> List[Any]: lowerCamelCase : Optional[int] = ConvNextModel(config=__a ) model.to(__a ) model.eval() lowerCamelCase : Any = model(__a ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def a__ ( self: int , __a: Union[str, Any] , __a: List[Any] , __a: Tuple )-> Optional[int]: lowerCamelCase : str = ConvNextForImageClassification(__a ) model.to(__a ) model.eval() lowerCamelCase : Any = model(__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a__ ( self: List[Any] , __a: Any , __a: Optional[int] , __a: Tuple )-> List[str]: lowerCamelCase : List[str] = ConvNextBackbone(config=__a ) model.to(__a ) model.eval() lowerCamelCase : int = model(__a ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None lowerCamelCase : Tuple = None lowerCamelCase : List[str] = ConvNextBackbone(config=__a ) model.to(__a ) model.eval() lowerCamelCase : List[Any] = model(__a ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def a__ ( self: Optional[Any] )-> Any: lowerCamelCase : List[Any] = self.prepare_config_and_inputs() lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = config_and_inputs lowerCamelCase : int = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class A__ ( __lowercase , __lowercase , unittest.TestCase): """simple docstring""" snake_case__ : int =( ( ConvNextModel, ConvNextForImageClassification, ConvNextBackbone, ) if is_torch_available() else () ) snake_case__ : str =( {'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification} if is_torch_available() else {} ) snake_case__ : Union[str, Any] =True snake_case__ : Optional[int] =False snake_case__ : Tuple =False snake_case__ : Union[str, Any] =False snake_case__ : Tuple =False def a__ ( self: Optional[Any] )-> Union[str, Any]: lowerCamelCase : Tuple = ConvNextModelTester(self ) lowerCamelCase : List[Any] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 ) def a__ ( self: Optional[int] )-> Dict: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a__ ( self: Optional[int] )-> Optional[Any]: return @unittest.skip(reason="""ConvNext does not use inputs_embeds""" ) def a__ ( self: int )-> Dict: pass @unittest.skip(reason="""ConvNext does not support input and output embeddings""" ) def a__ ( self: Dict )-> Optional[Any]: pass @unittest.skip(reason="""ConvNext does not use feedforward chunking""" ) def a__ ( self: int )-> List[Any]: pass def a__ ( self: Union[str, Any] )-> int: lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase : Any = model_class(__a ) lowerCamelCase : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase : Optional[Any] = [*signature.parameters.keys()] lowerCamelCase : List[str] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __a ) def a__ ( self: Optional[int] )-> str: lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def a__ ( self: str )-> int: lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__a ) def a__ ( self: int )-> Optional[int]: def check_hidden_states_output(__a: Tuple , __a: int , __a: Tuple ): lowerCamelCase : str = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): lowerCamelCase : Tuple = model(**self._prepare_for_class(__a , __a ) ) lowerCamelCase : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase : Optional[int] = self.model_tester.num_stages self.assertEqual(len(__a ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowerCamelCase , lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase : List[Any] = True check_hidden_states_output(__a , __a , __a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase : Tuple = True check_hidden_states_output(__a , __a , __a ) def a__ ( self: Dict )-> Optional[Any]: lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a ) @slow def a__ ( self: Optional[Any] )-> Tuple: for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase : str = ConvNextModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def snake_case ( ) -> Optional[int]: lowerCamelCase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class A__ ( unittest.TestCase): """simple docstring""" @cached_property def a__ ( self: Dict )-> Union[str, Any]: return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None @slow def a__ ( self: List[str] )-> Dict: lowerCamelCase : Tuple = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(__a ) lowerCamelCase : Dict = self.default_image_processor lowerCamelCase : Union[str, Any] = prepare_img() lowerCamelCase : Optional[Any] = image_processor(images=__a , return_tensors="""pt""" ).to(__a ) # forward pass with torch.no_grad(): lowerCamelCase : Any = model(**__a ) # verify the logits lowerCamelCase : Union[str, Any] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , __a ) lowerCamelCase : Tuple = torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(__a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) ) @require_torch class A__ ( unittest.TestCase , __lowercase): """simple docstring""" snake_case__ : Union[str, Any] =(ConvNextBackbone,) if is_torch_available() else () snake_case__ : Optional[Any] =ConvNextConfig snake_case__ : Optional[Any] =False def a__ ( self: List[str] )-> int: lowerCamelCase : Dict = ConvNextModelTester(self )
42
0
"""simple docstring""" from __future__ import annotations import math def snake_case ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] ) -> Any: if depth < 0: raise ValueError("""Depth cannot be less than 0""" ) if len(lowerCAmelCase__ ) == 0: raise ValueError("""Scores cannot be empty""" ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , ) return min( minimax(depth + 1 , node_index * 2 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , ) def snake_case ( ) -> Dict: lowerCamelCase : str = [90, 23, 6, 33, 21, 65, 123, 34423] lowerCamelCase : Optional[int] = math.log(len(lowerCAmelCase__ ) , 2 ) print("""Optimal value : """ , end="""""" ) print(minimax(0 , 0 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
714
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase :Optional[int] = logging.get_logger(__name__) __lowerCamelCase :List[str] = { 'google/realm-cc-news-pretrained-embedder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-encoder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-scorer': ( 'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-openqa': ( 'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json' ), 'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json', 'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json', 'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json', 'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json', # See all REALM models at https://huggingface.co/models?filter=realm } class A__ ( __lowercase): """simple docstring""" snake_case__ : Optional[Any] ='''realm''' def __init__( self: Union[str, Any] , __a: List[Any]=30_522 , __a: List[Any]=768 , __a: List[Any]=128 , __a: Union[str, Any]=12 , __a: Union[str, Any]=12 , __a: Optional[Any]=8 , __a: Dict=3_072 , __a: List[Any]="gelu_new" , __a: List[Any]=0.1 , __a: Tuple=0.1 , __a: Optional[Any]=512 , __a: Optional[int]=2 , __a: str=0.02 , __a: int=1e-1_2 , __a: Optional[Any]=256 , __a: Any=10 , __a: Dict=1e-3 , __a: Optional[Any]=5 , __a: Dict=320 , __a: Tuple=13_353_718 , __a: List[Any]=5_000 , __a: Dict=1 , __a: int=0 , __a: Dict=2 , **__a: List[str] , )-> Any: super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a ) # Common config lowerCamelCase : Optional[Any] = vocab_size lowerCamelCase : str = max_position_embeddings lowerCamelCase : Dict = hidden_size lowerCamelCase : Dict = retriever_proj_size lowerCamelCase : Optional[Any] = num_hidden_layers lowerCamelCase : List[str] = num_attention_heads lowerCamelCase : Tuple = num_candidates lowerCamelCase : int = intermediate_size lowerCamelCase : Dict = hidden_act lowerCamelCase : List[str] = hidden_dropout_prob lowerCamelCase : Dict = attention_probs_dropout_prob lowerCamelCase : Optional[int] = initializer_range lowerCamelCase : Dict = type_vocab_size lowerCamelCase : Optional[Any] = layer_norm_eps # Reader config lowerCamelCase : List[str] = span_hidden_size lowerCamelCase : Dict = max_span_width lowerCamelCase : Optional[Any] = reader_layer_norm_eps lowerCamelCase : Optional[int] = reader_beam_size lowerCamelCase : List[Any] = reader_seq_len # Retrieval config lowerCamelCase : int = num_block_records lowerCamelCase : Dict = searcher_beam_size
42
0
"""simple docstring""" from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING __lowerCamelCase :Optional[Any] = logging.get_logger(__name__) @add_end_docstrings(__lowerCAmelCase) class A__ ( __lowerCAmelCase): """simple docstring""" def __init__( self: str , *__a: int , **__a: List[str] )-> Tuple: super().__init__(*_UpperCamelCase , **_UpperCamelCase ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING ) def a__ ( self: Dict , __a: List[str]=None )-> Optional[int]: lowerCamelCase : Optional[int] = {} if top_k is not None: lowerCamelCase : str = top_k return {}, {}, postprocess_params def __call__( self: Optional[int] , __a: Union[str, List[str], "Image.Image", List["Image.Image"]] , **__a: Optional[int] )-> Dict: return super().__call__(_UpperCamelCase , **_UpperCamelCase ) def a__ ( self: Optional[int] , __a: Optional[int] )-> str: lowerCamelCase : Tuple = load_image(_UpperCamelCase ) lowerCamelCase : Any = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework ) return model_inputs def a__ ( self: str , __a: Union[str, Any] )-> List[str]: lowerCamelCase : Any = self.model(**_UpperCamelCase ) return model_outputs def a__ ( self: Optional[Any] , __a: List[str] , __a: List[str]=5 )-> str: if top_k > self.model.config.num_labels: lowerCamelCase : Union[str, Any] = self.model.config.num_labels if self.framework == "pt": lowerCamelCase : Optional[Any] = model_outputs.logits.softmax(-1 )[0] lowerCamelCase : Dict = probs.topk(_UpperCamelCase ) elif self.framework == "tf": lowerCamelCase : List[Any] = stable_softmax(model_outputs.logits , axis=-1 )[0] lowerCamelCase : List[Any] = tf.math.top_k(_UpperCamelCase , k=_UpperCamelCase ) lowerCamelCase : str = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(f'Unsupported framework: {self.framework}' ) lowerCamelCase : str = scores.tolist() lowerCamelCase : str = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase )]
715
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase :Tuple = logging.get_logger(__name__) __lowerCamelCase :Any = { 'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json', # See all GLPN models at https://huggingface.co/models?filter=glpn } class A__ ( __lowercase): """simple docstring""" snake_case__ : Tuple ='''glpn''' def __init__( self: Dict , __a: List[str]=3 , __a: Optional[int]=4 , __a: Dict=[2, 2, 2, 2] , __a: str=[8, 4, 2, 1] , __a: Optional[int]=[32, 64, 160, 256] , __a: Dict=[7, 3, 3, 3] , __a: Dict=[4, 2, 2, 2] , __a: Optional[Any]=[1, 2, 5, 8] , __a: Tuple=[4, 4, 4, 4] , __a: int="gelu" , __a: Union[str, Any]=0.0 , __a: str=0.0 , __a: Union[str, Any]=0.02 , __a: str=0.1 , __a: Union[str, Any]=1e-6 , __a: Any=64 , __a: Dict=10 , __a: Union[str, Any]=-1 , **__a: Optional[Any] , )-> Dict: super().__init__(**__a ) lowerCamelCase : Dict = num_channels lowerCamelCase : Any = num_encoder_blocks lowerCamelCase : Dict = depths lowerCamelCase : List[str] = sr_ratios lowerCamelCase : Dict = hidden_sizes lowerCamelCase : Tuple = patch_sizes lowerCamelCase : Optional[int] = strides lowerCamelCase : Optional[Any] = mlp_ratios lowerCamelCase : Union[str, Any] = num_attention_heads lowerCamelCase : List[str] = hidden_act lowerCamelCase : Any = hidden_dropout_prob lowerCamelCase : Optional[int] = attention_probs_dropout_prob lowerCamelCase : List[Any] = initializer_range lowerCamelCase : Dict = drop_path_rate lowerCamelCase : Any = layer_norm_eps lowerCamelCase : Optional[Any] = decoder_hidden_size lowerCamelCase : Tuple = max_depth lowerCamelCase : Optional[Any] = head_in_index
42
0
"""simple docstring""" import logging from dataclasses import dataclass, field from pathlib import Path from typing import Optional, Union from .generation.configuration_utils import GenerationConfig from .training_args import TrainingArguments from .utils import add_start_docstrings __lowerCamelCase :Union[str, Any] = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__) class A__ ( a__): """simple docstring""" snake_case__ : Dict =field(default=a__ , metadata={'''help''': '''Whether to use SortishSampler or not.'''}) snake_case__ : Union[str, Any] =field( default=a__ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''}) snake_case__ : Any =field( default=a__ , metadata={ '''help''': ( '''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default ''' '''to the `max_length` value of the model configuration.''' ) } , ) snake_case__ : Tuple =field( default=a__ , metadata={ '''help''': ( '''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default ''' '''to the `num_beams` value of the model configuration.''' ) } , ) snake_case__ : List[Any] =field( default=a__ , metadata={ '''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.''' } , ) def a__ ( self: Union[str, Any] )-> Optional[Any]: lowerCamelCase : str = super().to_dict() for k, v in d.items(): if isinstance(_A , _A ): lowerCamelCase : Tuple = v.to_dict() return d
716
"""simple docstring""" from __future__ import annotations import math def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : int ) -> float: lowerCamelCase : Dict = u for i in range(1 , UpperCamelCase__ ): lowerCamelCase : List[str] = temp * (u - i) return temp def snake_case ( ) -> None: lowerCamelCase : List[Any] = int(input("""enter the numbers of values: """ ) ) lowerCamelCase : list[list[float]] = [] for _ in range(UpperCamelCase__ ): y.append([] ) for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): y[i].append(UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = 0 print("""enter the values of parameters in a list: """ ) lowerCamelCase : Any = list(map(UpperCamelCase__ , input().split() ) ) print("""enter the values of corresponding parameters: """ ) for i in range(UpperCamelCase__ ): lowerCamelCase : int = float(input() ) lowerCamelCase : Dict = int(input("""enter the value to interpolate: """ ) ) lowerCamelCase : List[Any] = (value - x[0]) / (x[1] - x[0]) # for calculating forward difference table for i in range(1 , UpperCamelCase__ ): for j in range(n - i ): lowerCamelCase : str = y[j + 1][i - 1] - y[j][i - 1] lowerCamelCase : Any = y[0][0] for i in range(1 , UpperCamelCase__ ): summ += (ucal(UpperCamelCase__ , UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ ) print(F'the value at {value} is {summ}' ) if __name__ == "__main__": main()
42
0
"""simple docstring""" from __future__ import annotations import requests __lowerCamelCase :int = set( 'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split() ) def snake_case ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple = 1 , UpperCamelCase__ : Tuple = "new" , UpperCamelCase__ : Any = None ) -> Any: lowerCamelCase : int = wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(_lowerCamelCase ) - valid_terms ) ): lowerCamelCase : Optional[Any] = F'Invalid search term: {invalid_search_terms}' raise ValueError(_lowerCamelCase ) lowerCamelCase : List[str] = requests.get( F'https://reddit.com/r/{subreddit}/{age}.json?limit={limit}' , headers={"""User-agent""": """A random string"""} , ) if response.status_code == 429: raise requests.HTTPError lowerCamelCase : Any = response.json() if not wanted_data: return {id_: data["data"]["children"][id_] for id_ in range(_lowerCamelCase )} lowerCamelCase : Tuple = {} for id_ in range(_lowerCamelCase ): lowerCamelCase : Union[str, Any] = { item: data["data"]["children"][id_]["data"][item] for item in wanted_data } return data_dict if __name__ == "__main__": # If you get Error 429, that means you are rate limited.Try after some time print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext']))
717
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available __lowerCamelCase :str = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase :Optional[Any] = ['GPTSw3Tokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_swa import GPTSwaTokenizer else: import sys __lowerCamelCase :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
42
0
"""simple docstring""" from ..utils import DummyObject, requires_backends class A__ ( metaclass=_UpperCAmelCase): """simple docstring""" snake_case__ : Union[str, Any] =['''flax'''] def __init__( self: Any , *__a: int , **__a: List[Any] )-> Optional[int]: requires_backends(self , ["""flax"""] ) @classmethod def a__ ( cls: Optional[int] , *__a: List[Any] , **__a: str )-> Any: requires_backends(cls , ["""flax"""] ) @classmethod def a__ ( cls: int , *__a: Optional[Any] , **__a: str )-> List[str]: requires_backends(cls , ["""flax"""] ) class A__ ( metaclass=_UpperCAmelCase): """simple docstring""" snake_case__ : List[str] =['''flax'''] def __init__( self: Dict , *__a: str , **__a: Tuple )-> Any: requires_backends(self , ["""flax"""] ) @classmethod def a__ ( cls: Any , *__a: str , **__a: Union[str, Any] )-> Optional[int]: requires_backends(cls , ["""flax"""] ) @classmethod def a__ ( cls: Union[str, Any] , *__a: Tuple , **__a: List[Any] )-> Optional[int]: requires_backends(cls , ["""flax"""] ) class A__ ( metaclass=_UpperCAmelCase): """simple docstring""" snake_case__ : Optional[Any] =['''flax'''] def __init__( self: int , *__a: Dict , **__a: Optional[Any] )-> int: requires_backends(self , ["""flax"""] ) @classmethod def a__ ( cls: Optional[int] , *__a: Optional[int] , **__a: Any )-> int: requires_backends(cls , ["""flax"""] ) @classmethod def a__ ( cls: Dict , *__a: Optional[int] , **__a: Union[str, Any] )-> int: requires_backends(cls , ["""flax"""] ) class A__ ( metaclass=_UpperCAmelCase): """simple docstring""" snake_case__ : int =['''flax'''] def __init__( self: List[str] , *__a: Dict , **__a: Tuple )-> List[str]: requires_backends(self , ["""flax"""] ) @classmethod def a__ ( cls: int , *__a: Optional[int] , **__a: Union[str, Any] )-> Dict: requires_backends(cls , ["""flax"""] ) @classmethod def a__ ( cls: Union[str, Any] , *__a: Tuple , **__a: Optional[Any] )-> List[Any]: requires_backends(cls , ["""flax"""] ) class A__ ( metaclass=_UpperCAmelCase): """simple docstring""" snake_case__ : Tuple =['''flax'''] def __init__( self: Optional[Any] , *__a: int , **__a: List[Any] )-> Optional[int]: requires_backends(self , ["""flax"""] ) @classmethod def a__ ( cls: Dict , *__a: List[str] , **__a: Union[str, Any] )-> int: requires_backends(cls , ["""flax"""] ) @classmethod def a__ ( cls: Any , *__a: Union[str, Any] , **__a: Dict )-> Optional[Any]: requires_backends(cls , ["""flax"""] ) class A__ ( metaclass=_UpperCAmelCase): """simple docstring""" snake_case__ : Tuple =['''flax'''] def __init__( self: Tuple , *__a: Optional[Any] , **__a: List[Any] )-> int: requires_backends(self , ["""flax"""] ) @classmethod def a__ ( cls: str , *__a: List[Any] , **__a: Dict )-> int: requires_backends(cls , ["""flax"""] ) @classmethod def a__ ( cls: Optional[int] , *__a: Union[str, Any] , **__a: Tuple )-> Optional[Any]: requires_backends(cls , ["""flax"""] ) class A__ ( metaclass=_UpperCAmelCase): """simple docstring""" snake_case__ : Union[str, Any] =['''flax'''] def __init__( self: List[Any] , *__a: Optional[Any] , **__a: str )-> int: requires_backends(self , ["""flax"""] ) @classmethod def a__ ( cls: str , *__a: List[Any] , **__a: Union[str, Any] )-> Any: requires_backends(cls , ["""flax"""] ) @classmethod def a__ ( cls: Optional[int] , *__a: Optional[Any] , **__a: Any )-> Union[str, Any]: requires_backends(cls , ["""flax"""] ) class A__ ( metaclass=_UpperCAmelCase): """simple docstring""" snake_case__ : int =['''flax'''] def __init__( self: Union[str, Any] , *__a: List[str] , **__a: Any )-> Dict: requires_backends(self , ["""flax"""] ) @classmethod def a__ ( cls: Dict , *__a: Tuple , **__a: Tuple )-> List[str]: requires_backends(cls , ["""flax"""] ) @classmethod def a__ ( cls: Any , *__a: Optional[Any] , **__a: Optional[Any] )-> Any: requires_backends(cls , ["""flax"""] ) class A__ ( metaclass=_UpperCAmelCase): """simple docstring""" snake_case__ : Any =['''flax'''] def __init__( self: List[str] , *__a: Optional[int] , **__a: Tuple )-> Any: requires_backends(self , ["""flax"""] ) @classmethod def a__ ( cls: Union[str, Any] , *__a: Optional[int] , **__a: Optional[Any] )-> Union[str, Any]: requires_backends(cls , ["""flax"""] ) @classmethod def a__ ( cls: Optional[Any] , *__a: List[str] , **__a: Tuple )-> List[Any]: requires_backends(cls , ["""flax"""] ) class A__ ( metaclass=_UpperCAmelCase): """simple docstring""" snake_case__ : Optional[Any] =['''flax'''] def __init__( self: Dict , *__a: Dict , **__a: int )-> str: requires_backends(self , ["""flax"""] ) @classmethod def a__ ( cls: Any , *__a: Optional[int] , **__a: Union[str, Any] )-> str: requires_backends(cls , ["""flax"""] ) @classmethod def a__ ( cls: Optional[int] , *__a: List[Any] , **__a: Any )-> Union[str, Any]: requires_backends(cls , ["""flax"""] ) class A__ ( metaclass=_UpperCAmelCase): """simple docstring""" snake_case__ : int =['''flax'''] def __init__( self: Optional[Any] , *__a: Dict , **__a: List[Any] )-> List[Any]: requires_backends(self , ["""flax"""] ) @classmethod def a__ ( cls: List[str] , *__a: Union[str, Any] , **__a: Any )-> int: requires_backends(cls , ["""flax"""] ) @classmethod def a__ ( cls: List[Any] , *__a: str , **__a: Any )-> List[Any]: requires_backends(cls , ["""flax"""] ) class A__ ( metaclass=_UpperCAmelCase): """simple docstring""" snake_case__ : Tuple =['''flax'''] def __init__( self: Optional[int] , *__a: Union[str, Any] , **__a: Optional[Any] )-> str: requires_backends(self , ["""flax"""] ) @classmethod def a__ ( cls: Any , *__a: Optional[Any] , **__a: Optional[int] )-> List[Any]: requires_backends(cls , ["""flax"""] ) @classmethod def a__ ( cls: Any , *__a: List[str] , **__a: Tuple )-> Dict: requires_backends(cls , ["""flax"""] ) class A__ ( metaclass=_UpperCAmelCase): """simple docstring""" snake_case__ : Any =['''flax'''] def __init__( self: Tuple , *__a: str , **__a: Any )-> Dict: requires_backends(self , ["""flax"""] ) @classmethod def a__ ( cls: List[Any] , *__a: int , **__a: List[str] )-> Any: requires_backends(cls , ["""flax"""] ) @classmethod def a__ ( cls: List[str] , *__a: Dict , **__a: Tuple )-> Union[str, Any]: requires_backends(cls , ["""flax"""] )
718
"""simple docstring""" import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() __lowerCamelCase :Dict = logging.get_logger() def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : LevitConfig , UpperCamelCase__ : Path , UpperCamelCase__ : bool = True ) -> Dict: print(F'Converting {name}...' ) with torch.no_grad(): if hidden_sizes == 128: if name[-1] == "S": lowerCamelCase : Optional[Any] = timm.create_model("""levit_128s""" , pretrained=UpperCamelCase__ ) else: lowerCamelCase : Dict = timm.create_model("""levit_128""" , pretrained=UpperCamelCase__ ) if hidden_sizes == 192: lowerCamelCase : Tuple = timm.create_model("""levit_192""" , pretrained=UpperCamelCase__ ) if hidden_sizes == 256: lowerCamelCase : Optional[int] = timm.create_model("""levit_256""" , pretrained=UpperCamelCase__ ) if hidden_sizes == 384: lowerCamelCase : Dict = timm.create_model("""levit_384""" , pretrained=UpperCamelCase__ ) from_model.eval() lowerCamelCase : Optional[Any] = LevitForImageClassificationWithTeacher(UpperCamelCase__ ).eval() lowerCamelCase : Tuple = OrderedDict() lowerCamelCase : Optional[Any] = from_model.state_dict() lowerCamelCase : str = list(from_model.state_dict().keys() ) lowerCamelCase : List[Any] = list(our_model.state_dict().keys() ) print(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) ) for i in range(len(UpperCamelCase__ ) ): lowerCamelCase : str = weights[og_keys[i]] our_model.load_state_dict(UpperCamelCase__ ) lowerCamelCase : int = torch.randn((2, 3, 224, 224) ) lowerCamelCase : Any = from_model(UpperCamelCase__ ) lowerCamelCase : List[Any] = our_model(UpperCamelCase__ ).logits assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ ), "The model logits don't match the original one." lowerCamelCase : Dict = name print(UpperCamelCase__ ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) lowerCamelCase : Optional[int] = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(F'Pushed {checkpoint_name}' ) def snake_case ( UpperCamelCase__ : Path , UpperCamelCase__ : str = None , UpperCamelCase__ : bool = True ) -> Optional[int]: lowerCamelCase : Optional[Any] = """imagenet-1k-id2label.json""" lowerCamelCase : List[Any] = 1000 lowerCamelCase : Dict = (1, num_labels) lowerCamelCase : List[Any] = """huggingface/label-files""" lowerCamelCase : Optional[int] = num_labels lowerCamelCase : List[str] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) ) lowerCamelCase : Any = {int(UpperCamelCase__ ): v for k, v in idalabel.items()} lowerCamelCase : List[Any] = idalabel lowerCamelCase : str = {v: k for k, v in idalabel.items()} lowerCamelCase : Tuple = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ ) lowerCamelCase : Optional[int] = { """levit-128S""": 128, """levit-128""": 128, """levit-192""": 192, """levit-256""": 256, """levit-384""": 384, } lowerCamelCase : List[Any] = { """levit-128S""": ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), """levit-128""": ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), """levit-192""": ImageNetPreTrainedConfig( hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), """levit-256""": ImageNetPreTrainedConfig( hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), """levit-384""": ImageNetPreTrainedConfig( hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] , UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return config, expected_shape if __name__ == "__main__": __lowerCamelCase :Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default=None, type=str, help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,', ) parser.add_argument( '--pytorch_dump_folder_path', default='levit-dump-folder/', type=Path, required=False, help='Path to the output PyTorch model directory.', ) parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') parser.add_argument( '--no-push_to_hub', dest='push_to_hub', action='store_false', help='Do not push model and image processor to the hub', ) __lowerCamelCase :List[Any] = parser.parse_args() __lowerCamelCase :Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
42
0
"""simple docstring""" def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] ) -> list: lowerCamelCase : int = len(_A ) lowerCamelCase : Dict = [[0] * n for i in range(_A )] for i in range(_A ): lowerCamelCase : List[Any] = y_points[i] for i in range(2 , _A ): for j in range(_A , _A ): lowerCamelCase : Dict = ( (xa - x_points[j - i + 1]) * q[j][i - 1] - (xa - x_points[j]) * q[j - 1][i - 1] ) / (x_points[j] - x_points[j - i + 1]) return [q[n - 1][n - 1], q] if __name__ == "__main__": import doctest doctest.testmod()
719
"""simple docstring""" import torch from diffusers import KDPMaDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class A__ ( __lowercase): """simple docstring""" snake_case__ : Tuple =(KDPMaDiscreteScheduler,) snake_case__ : Tuple =10 def a__ ( self: List[Any] , **__a: Optional[int] )-> Union[str, Any]: lowerCamelCase : int = { """num_train_timesteps""": 1_100, """beta_start""": 0.00_01, """beta_end""": 0.02, """beta_schedule""": """linear""", } config.update(**__a ) return config def a__ ( self: Union[str, Any] )-> Any: for timesteps in [10, 50, 100, 1_000]: self.check_over_configs(num_train_timesteps=__a ) def a__ ( self: str )-> int: for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ): self.check_over_configs(beta_start=__a , beta_end=__a ) def a__ ( self: int )-> Union[str, Any]: for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=__a ) def a__ ( self: List[Any] )-> List[Any]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__a ) def a__ ( self: Union[str, Any] )-> int: lowerCamelCase : List[str] = self.scheduler_classes[0] lowerCamelCase : Union[str, Any] = self.get_scheduler_config(prediction_type="""v_prediction""" ) lowerCamelCase : List[str] = scheduler_class(**__a ) scheduler.set_timesteps(self.num_inference_steps ) lowerCamelCase : Dict = self.dummy_model() lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCamelCase : List[Any] = sample.to(__a ) for i, t in enumerate(scheduler.timesteps ): lowerCamelCase : Optional[Any] = scheduler.scale_model_input(__a , __a ) lowerCamelCase : Optional[int] = model(__a , __a ) lowerCamelCase : Tuple = scheduler.step(__a , __a , __a ) lowerCamelCase : Optional[Any] = output.prev_sample lowerCamelCase : List[str] = torch.sum(torch.abs(__a ) ) lowerCamelCase : Tuple = torch.mean(torch.abs(__a ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 4.6_9_3_4e-0_7 ) < 1e-2 assert abs(result_mean.item() - 6.1_1_1_2e-1_0 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7 ) < 1e-2 assert abs(result_mean.item() - 0.00_02 ) < 1e-3 def a__ ( self: Any )-> Any: if torch_device == "mps": return lowerCamelCase : Dict = self.scheduler_classes[0] lowerCamelCase : Dict = self.get_scheduler_config() lowerCamelCase : int = scheduler_class(**__a ) scheduler.set_timesteps(self.num_inference_steps ) lowerCamelCase : List[Any] = self.dummy_model() lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCamelCase : Optional[int] = sample.to(__a ) for i, t in enumerate(scheduler.timesteps ): lowerCamelCase : Dict = scheduler.scale_model_input(__a , __a ) lowerCamelCase : Optional[Any] = model(__a , __a ) lowerCamelCase : Tuple = scheduler.step(__a , __a , __a ) lowerCamelCase : str = output.prev_sample lowerCamelCase : Tuple = torch.sum(torch.abs(__a ) ) lowerCamelCase : Tuple = torch.mean(torch.abs(__a ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 20.41_25 ) < 1e-2 assert abs(result_mean.item() - 0.02_66 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 20.41_25 ) < 1e-2 assert abs(result_mean.item() - 0.02_66 ) < 1e-3 def a__ ( self: Optional[Any] )-> List[Any]: if torch_device == "mps": return lowerCamelCase : Any = self.scheduler_classes[0] lowerCamelCase : Union[str, Any] = self.get_scheduler_config() lowerCamelCase : Optional[Any] = scheduler_class(**__a ) scheduler.set_timesteps(self.num_inference_steps , device=__a ) lowerCamelCase : Union[str, Any] = self.dummy_model() lowerCamelCase : List[str] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma for t in scheduler.timesteps: lowerCamelCase : Union[str, Any] = scheduler.scale_model_input(__a , __a ) lowerCamelCase : Optional[int] = model(__a , __a ) lowerCamelCase : int = scheduler.step(__a , __a , __a ) lowerCamelCase : int = output.prev_sample lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(__a ) ) lowerCamelCase : int = torch.mean(torch.abs(__a ) ) if str(__a ).startswith("""cpu""" ): # The following sum varies between 148 and 156 on mps. Why? assert abs(result_sum.item() - 20.41_25 ) < 1e-2 assert abs(result_mean.item() - 0.02_66 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 20.41_25 ) < 1e-2 assert abs(result_mean.item() - 0.02_66 ) < 1e-3
42
0
"""simple docstring""" def snake_case ( UpperCamelCase__ : Any = 1000000 ) -> int: lowerCamelCase : int = [i - 1 for i in range(limit + 1 )] for i in range(2 , limit + 1 ): if phi[i] == i - 1: for j in range(2 * i , limit + 1 , UpperCamelCase__ ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
720
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class A__ ( __lowercase , __lowercase , unittest.TestCase): """simple docstring""" snake_case__ : str =StableDiffusionXLImgaImgPipeline snake_case__ : Any =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''} snake_case__ : Optional[int] =PipelineTesterMixin.required_optional_params - {'''latents'''} snake_case__ : Dict =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS snake_case__ : Tuple =IMAGE_TO_IMAGE_IMAGE_PARAMS snake_case__ : List[str] =IMAGE_TO_IMAGE_IMAGE_PARAMS def a__ ( self: List[str] )-> int: torch.manual_seed(0 ) lowerCamelCase : Any = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=__a , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , ) lowerCamelCase : Any = EulerDiscreteScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , ) torch.manual_seed(0 ) lowerCamelCase : Any = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) lowerCamelCase : Optional[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=32 , ) lowerCamelCase : Dict = CLIPTextModel(__a ) lowerCamelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__a ) lowerCamelCase : Dict = CLIPTextModelWithProjection(__a ) lowerCamelCase : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__a ) lowerCamelCase : str = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """text_encoder_2""": text_encoder_a, """tokenizer_2""": tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def a__ ( self: Any , __a: str , __a: Tuple=0 )-> Union[str, Any]: lowerCamelCase : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a ) lowerCamelCase : Any = image / 2 + 0.5 if str(__a ).startswith("""mps""" ): lowerCamelCase : Dict = torch.manual_seed(__a ) else: lowerCamelCase : Tuple = torch.Generator(device=__a ).manual_seed(__a ) lowerCamelCase : Tuple = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 5.0, """output_type""": """numpy""", """strength""": 0.75, } return inputs def a__ ( self: Dict )-> Optional[Any]: lowerCamelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator lowerCamelCase : Union[str, Any] = self.get_dummy_components() lowerCamelCase : Optional[int] = StableDiffusionXLImgaImgPipeline(**__a ) lowerCamelCase : int = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) lowerCamelCase : Optional[Any] = self.get_dummy_inputs(__a ) lowerCamelCase : Optional[int] = sd_pipe(**__a ).images lowerCamelCase : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCamelCase : Any = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def a__ ( self: Optional[int] )-> Union[str, Any]: super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 ) def a__ ( self: Optional[Any] )-> str: super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def a__ ( self: List[str] )-> Optional[Any]: pass def a__ ( self: List[Any] )-> Union[str, Any]: lowerCamelCase : Tuple = self.get_dummy_components() lowerCamelCase : Union[str, Any] = StableDiffusionXLImgaImgPipeline(**__a ) lowerCamelCase : str = sd_pipe.to(__a ) lowerCamelCase : Any = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) # forward without prompt embeds lowerCamelCase : Dict = self.get_dummy_inputs(__a ) lowerCamelCase : Any = 3 * ["""this is a negative prompt"""] lowerCamelCase : Optional[int] = negative_prompt lowerCamelCase : Tuple = 3 * [inputs["""prompt"""]] lowerCamelCase : List[Any] = sd_pipe(**__a ) lowerCamelCase : Optional[int] = output.images[0, -3:, -3:, -1] # forward with prompt embeds lowerCamelCase : Tuple = self.get_dummy_inputs(__a ) lowerCamelCase : List[Any] = 3 * ["""this is a negative prompt"""] lowerCamelCase : Tuple = 3 * [inputs.pop("""prompt""" )] ( ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ) : Union[str, Any] = sd_pipe.encode_prompt(__a , negative_prompt=__a ) lowerCamelCase : int = sd_pipe( **__a , prompt_embeds=__a , negative_prompt_embeds=__a , pooled_prompt_embeds=__a , negative_pooled_prompt_embeds=__a , ) lowerCamelCase : Union[str, Any] = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4 @slow @require_torch_gpu class A__ ( unittest.TestCase): """simple docstring""" def a__ ( self: Dict )-> str: super().tearDown() gc.collect() torch.cuda.empty_cache() def a__ ( self: Union[str, Any] , __a: Any , __a: Any="cpu" , __a: str=torch.floataa , __a: Any=0 )-> Optional[Any]: lowerCamelCase : Optional[Any] = torch.Generator(device=__a ).manual_seed(__a ) lowerCamelCase : List[Any] = np.random.RandomState(__a ).standard_normal((1, 4, 64, 64) ) lowerCamelCase : List[str] = torch.from_numpy(__a ).to(device=__a , dtype=__a ) lowerCamelCase : int = { """prompt""": """a photograph of an astronaut riding a horse""", """latents""": latents, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def a__ ( self: Optional[int] )-> List[str]: lowerCamelCase : Tuple = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) lowerCamelCase : Optional[int] = self.get_inputs(__a ) lowerCamelCase : Optional[Any] = pipe(**__a ).images lowerCamelCase : Dict = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) lowerCamelCase : List[str] = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] ) assert np.abs(image_slice - expected_slice ).max() < 7e-3
42
0
"""simple docstring""" from __future__ import annotations def snake_case ( UpperCamelCase__ : list , UpperCamelCase__ : int | None = None , UpperCamelCase__ : int | None = None ) -> Union[str, Any]: if start is None: lowerCamelCase : int = 0 if end is None: lowerCamelCase : int = len(__UpperCamelCase ) - 1 if start >= end: return lowerCamelCase : str = (start + end) // 2 slowsort(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) slowsort(__UpperCamelCase , mid + 1 , __UpperCamelCase ) if sequence[end] < sequence[mid]: lowerCamelCase : str = sequence[mid], sequence[end] slowsort(__UpperCamelCase , __UpperCamelCase , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
721
"""simple docstring""" import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class A__ : """simple docstring""" def a__ ( self: Optional[int] , __a: Optional[int] , __a: Tuple , __a: Optional[int] )-> List[str]: return None class A__ : """simple docstring""" def a__ ( self: Optional[int] , __a: Tuple , __a: str , __a: str , __a: str )-> Tuple: return None class A__ ( unittest.TestCase): """simple docstring""" snake_case__ : Optional[Any] =[ # (model_name, model_kwargs) ('''bert-base-cased''', {}), ('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def a__ ( self: Optional[Any] )-> int: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__a , """tf""" , 12 , **__a ) @require_torch @slow def a__ ( self: str )-> int: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__a , """pt""" , 12 , **__a ) @require_torch @slow def a__ ( self: Union[str, Any] )-> Dict: from transformers import BertModel lowerCamelCase : int = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""] with NamedTemporaryFile(mode="""w+t""" ) as vocab_file: vocab_file.write("""\n""".join(__a ) ) vocab_file.flush() lowerCamelCase : Dict = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: lowerCamelCase : List[str] = BertModel(BertConfig(vocab_size=len(__a ) ) ) model.save_pretrained(__a ) self._test_export(__a , """pt""" , 12 , __a ) @require_tf @slow def a__ ( self: Optional[Any] )-> Optional[int]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowerCamelCase : Optional[int] = self._test_export(__a , """tf""" , 12 , **__a ) lowerCamelCase : Tuple = quantize(Path(__a ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__a ).stat().st_size: self.fail("""Quantized model is bigger than initial ONNX model""" ) @require_torch @slow def a__ ( self: Any )-> Optional[int]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowerCamelCase : Any = self._test_export(__a , """pt""" , 12 , **__a ) lowerCamelCase : Dict = quantize(__a ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__a ).stat().st_size: self.fail("""Quantized model is bigger than initial ONNX model""" ) def a__ ( self: List[Any] , __a: Optional[Any] , __a: List[Any] , __a: Union[str, Any] , __a: Optional[Any]=None , **__a: Optional[int] )-> Any: try: # Compute path with TemporaryDirectory() as tempdir: lowerCamelCase : Optional[Any] = Path(__a ).joinpath("""model.onnx""" ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(__a , __a , __a , __a , __a , **__a ) return path except Exception as e: self.fail(__a ) @require_torch @require_tokenizers @slow def a__ ( self: Tuple )-> Dict: from transformers import BertModel lowerCamelCase : int = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) ) lowerCamelCase : List[Any] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" ) self._test_infer_dynamic_axis(__a , __a , """pt""" ) @require_tf @require_tokenizers @slow def a__ ( self: Optional[Any] )-> List[Any]: from transformers import TFBertModel lowerCamelCase : Union[str, Any] = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) ) lowerCamelCase : str = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" ) self._test_infer_dynamic_axis(__a , __a , """tf""" ) def a__ ( self: List[str] , __a: str , __a: Optional[Any] , __a: str )-> List[Any]: lowerCamelCase : List[str] = FeatureExtractionPipeline(__a , __a ) lowerCamelCase : List[str] = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""] lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = infer_shapes(__a , __a ) # Assert all variables are present self.assertEqual(len(__a ) , len(__a ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] , __a ) self.assertSequenceEqual(variable_names[3:] , __a ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} ) self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} ) def a__ ( self: List[Any] )-> int: lowerCamelCase : List[str] = ["""input_ids""", """attention_mask""", """token_type_ids"""] lowerCamelCase : str = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]} lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncContiguousArgs() , __a , __a ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(__a ) , 3 ) # Should have exactly the same input names self.assertEqual(set(__a ) , set(__a ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(__a , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncNonContiguousArgs() , __a , __a ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(__a ) , 1 ) self.assertEqual(len(__a ) , 1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] , tokens["""input_ids"""] ) self.assertEqual(ordered_input_names[0] , """input_ids""" ) def a__ ( self: Tuple )-> Tuple: lowerCamelCase : Optional[int] = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" ) self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
42
0
"""simple docstring""" from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class A__ : """simple docstring""" def __init__( self: Dict , __a: Union[str, Any] , __a: List[Any]=13 , __a: Optional[Any]=30 , __a: List[Any]=2 , __a: Dict=3 , __a: str=True , __a: Any=True , __a: List[str]=32 , __a: Optional[Any]=2 , __a: int=4 , __a: Optional[Any]=37 , __a: Union[str, Any]="gelu" , __a: str=0.1 , __a: str=0.1 , __a: Tuple=10 , __a: Tuple=0.02 , __a: Optional[int]=3 , __a: Optional[Any]=0.6 , __a: Dict=None , )-> List[str]: lowerCamelCase : Any = parent lowerCamelCase : Optional[int] = batch_size lowerCamelCase : Dict = image_size lowerCamelCase : List[str] = patch_size lowerCamelCase : int = num_channels lowerCamelCase : str = is_training lowerCamelCase : Union[str, Any] = use_labels lowerCamelCase : Union[str, Any] = hidden_size lowerCamelCase : int = num_hidden_layers lowerCamelCase : List[str] = num_attention_heads lowerCamelCase : Optional[Any] = intermediate_size lowerCamelCase : Any = hidden_act lowerCamelCase : Tuple = hidden_dropout_prob lowerCamelCase : Optional[Any] = attention_probs_dropout_prob lowerCamelCase : List[Any] = type_sequence_label_size lowerCamelCase : Tuple = initializer_range lowerCamelCase : Dict = mask_ratio lowerCamelCase : int = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) lowerCamelCase : Union[str, Any] = (image_size // patch_size) ** 2 lowerCamelCase : int = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def a__ ( self: int )-> str: lowerCamelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase : Union[str, Any] = None if self.use_labels: lowerCamelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase : Union[str, Any] = self.get_config() return config, pixel_values, labels def a__ ( self: List[str] )-> List[str]: return ViTMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , ) def a__ ( self: List[str] , __a: int , __a: str , __a: Union[str, Any] )-> Tuple: lowerCamelCase : List[str] = TFViTMAEModel(config=A_ ) lowerCamelCase : Any = model(A_ , training=A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a__ ( self: Optional[Any] , __a: Dict , __a: Union[str, Any] , __a: List[str] )-> Optional[int]: lowerCamelCase : List[str] = TFViTMAEForPreTraining(A_ ) lowerCamelCase : int = model(A_ , training=A_ ) # expected sequence length = num_patches lowerCamelCase : str = (self.image_size // self.patch_size) ** 2 lowerCamelCase : Tuple = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) # test greyscale images lowerCamelCase : List[str] = 1 lowerCamelCase : List[str] = TFViTMAEForPreTraining(A_ ) lowerCamelCase : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase : Union[str, Any] = model(A_ , training=A_ ) lowerCamelCase : Union[str, Any] = self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) ) def a__ ( self: Dict )-> int: lowerCamelCase : List[str] = self.prepare_config_and_inputs() ((lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase)) : List[Any] = config_and_inputs lowerCamelCase : Union[str, Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class A__ ( _lowercase , _lowercase , unittest.TestCase): """simple docstring""" snake_case__ : int =(TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () snake_case__ : str ={'''feature-extraction''': TFViTMAEModel} if is_tf_available() else {} snake_case__ : Any =False snake_case__ : Any =False snake_case__ : str =False snake_case__ : str =False def a__ ( self: List[Any] )-> Optional[int]: lowerCamelCase : List[str] = TFViTMAEModelTester(self ) lowerCamelCase : Optional[int] = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 ) def a__ ( self: int )-> Optional[int]: self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMAE does not use inputs_embeds""" ) def a__ ( self: Any )-> Any: pass def a__ ( self: List[str] )-> List[str]: lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase : str = model_class(A_ ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) lowerCamelCase : int = model.get_output_embeddings() self.assertTrue(x is None or isinstance(A_ , tf.keras.layers.Layer ) ) def a__ ( self: Dict )-> Optional[int]: lowerCamelCase , lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase : str = model_class(A_ ) lowerCamelCase : Optional[int] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase : Optional[int] = [*signature.parameters.keys()] lowerCamelCase : Tuple = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , A_ ) def a__ ( self: List[Any] )-> List[str]: lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def a__ ( self: Optional[Any] )-> int: lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*A_ ) def a__ ( self: List[str] )-> Optional[Any]: # make the mask reproducible np.random.seed(2 ) lowerCamelCase , lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase : str = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowerCamelCase : str = model_class(A_ ) lowerCamelCase : Dict = self._prepare_for_class(A_ , A_ ) lowerCamelCase : Optional[int] = model(A_ , noise=A_ ) lowerCamelCase : int = copy.deepcopy(self._prepare_for_class(A_ , A_ ) ) lowerCamelCase : str = model(**A_ , noise=A_ ) lowerCamelCase : Optional[int] = outputs_dict[0].numpy() lowerCamelCase : Any = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 ) def a__ ( self: List[Any] )-> Dict: # make the mask reproducible np.random.seed(2 ) lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase : Optional[Any] = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(__a: Union[str, Any] ): lowerCamelCase : Union[str, Any] = {} for k, v in inputs_dict.items(): if tf.is_tensor(A_ ): lowerCamelCase : Tuple = v.numpy() else: lowerCamelCase : int = np.array(A_ ) return inputs_np_dict for model_class in self.all_model_classes: lowerCamelCase : List[str] = model_class(A_ ) lowerCamelCase : Dict = self._prepare_for_class(A_ , A_ ) lowerCamelCase : Dict = prepare_numpy_arrays(A_ ) lowerCamelCase : Any = model(A_ , noise=A_ ) lowerCamelCase : int = model(**A_ , noise=A_ ) self.assert_outputs_same(A_ , A_ ) def a__ ( self: Tuple , __a: Dict , __a: Union[str, Any] , __a: Optional[Any] )-> Dict: # make masks reproducible np.random.seed(2 ) lowerCamelCase : Optional[Any] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) lowerCamelCase : Any = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowerCamelCase : Dict = tf.constant(A_ ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument lowerCamelCase : List[str] = tf_noise super().check_pt_tf_models(A_ , A_ , A_ ) def a__ ( self: Optional[int] )-> Any: # make mask reproducible np.random.seed(2 ) lowerCamelCase , lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase : Tuple = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(A_ ) if module_member_name.endswith("""MainLayer""" ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )] for module_member in (getattr(A_ , A_ ),) if isinstance(A_ , A_ ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(A_ , """_keras_serializable""" , A_ ) } lowerCamelCase : Optional[Any] = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowerCamelCase : Optional[Any] = tf.convert_to_tensor(A_ ) inputs_dict.update({"""noise""": noise} ) for main_layer_class in tf_main_layer_classes: lowerCamelCase : List[str] = main_layer_class(A_ ) lowerCamelCase : Union[str, Any] = { name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } lowerCamelCase : List[str] = tf.keras.Model(A_ , outputs=main_layer(A_ ) ) lowerCamelCase : Optional[int] = model(A_ ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCamelCase : Any = os.path.join(A_ , """keras_model.h5""" ) model.save(A_ ) lowerCamelCase : str = tf.keras.models.load_model( A_ , custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(A_ , tf.keras.Model ) lowerCamelCase : Any = model(A_ ) self.assert_outputs_same(A_ , A_ ) @slow def a__ ( self: Any )-> str: # make mask reproducible np.random.seed(2 ) lowerCamelCase , lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase : Tuple = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowerCamelCase : Optional[int] = model_class(A_ ) lowerCamelCase : List[str] = self._prepare_for_class(A_ , A_ ) lowerCamelCase : str = model(A_ , noise=A_ ) if model_class.__name__ == "TFViTMAEModel": lowerCamelCase : Any = outputs.last_hidden_state.numpy() lowerCamelCase : List[Any] = 0 else: lowerCamelCase : Any = outputs.logits.numpy() lowerCamelCase : List[Any] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(A_ , saved_model=A_ ) lowerCamelCase : Optional[int] = model_class.from_pretrained(A_ ) lowerCamelCase : Tuple = model(A_ , noise=A_ ) if model_class.__name__ == "TFViTMAEModel": lowerCamelCase : List[Any] = after_outputs["""last_hidden_state"""].numpy() lowerCamelCase : Dict = 0 else: lowerCamelCase : Tuple = after_outputs["""logits"""].numpy() lowerCamelCase : int = 0 lowerCamelCase : Any = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(A_ , 1e-5 ) def a__ ( self: str )-> Any: # make mask reproducible np.random.seed(2 ) lowerCamelCase , lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase : Tuple = int((config.image_size // config.patch_size) ** 2 ) lowerCamelCase : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowerCamelCase : List[Any] = model_class(A_ ) lowerCamelCase : Any = self._prepare_for_class(A_ , A_ ) lowerCamelCase : Dict = model(A_ , noise=A_ ) lowerCamelCase : Any = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(A_ ) lowerCamelCase : Optional[int] = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config lowerCamelCase : List[Any] = model_class.from_config(model.config ) lowerCamelCase : Optional[int] = new_model(A_ ) # Build model new_model.set_weights(model.get_weights() ) lowerCamelCase : Any = new_model(A_ , noise=A_ ) self.assert_outputs_same(A_ , A_ ) @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.""" ) def a__ ( self: Optional[int] )-> Optional[Any]: pass @unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" ) def a__ ( self: List[Any] )-> Dict: pass @slow def a__ ( self: Union[str, Any] )-> List[str]: lowerCamelCase : Dict = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" ) self.assertIsNotNone(A_ ) def snake_case ( ) -> str: lowerCamelCase : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class A__ ( unittest.TestCase): """simple docstring""" @cached_property def a__ ( self: Dict )-> Union[str, Any]: return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None @slow def a__ ( self: Union[str, Any] )-> Optional[int]: # make random mask reproducible across the PT and TF model np.random.seed(2 ) lowerCamelCase : int = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ) lowerCamelCase : int = self.default_image_processor lowerCamelCase : Optional[Any] = prepare_img() lowerCamelCase : Any = image_processor(images=A_ , return_tensors="""tf""" ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) lowerCamelCase : Optional[Any] = ViTMAEConfig() lowerCamelCase : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) lowerCamelCase : Union[str, Any] = np.random.uniform(size=(1, num_patches) ) # forward pass lowerCamelCase : int = model(**A_ , noise=A_ ) # verify the logits lowerCamelCase : Optional[Any] = tf.convert_to_tensor([1, 196, 768] ) self.assertEqual(outputs.logits.shape , A_ ) lowerCamelCase : Dict = tf.convert_to_tensor( [[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] , A_ , atol=1e-4 )
700
"""simple docstring""" import unittest from knapsack import greedy_knapsack as kp class A__ ( unittest.TestCase): """simple docstring""" def a__ ( self: Optional[int] )-> Union[str, Any]: lowerCamelCase : Tuple = [10, 20, 30, 40, 50, 60] lowerCamelCase : Union[str, Any] = [2, 4, 6, 8, 10, 12] lowerCamelCase : Union[str, Any] = 100 self.assertEqual(kp.calc_profit(__a , __a , __a ) , 210 ) def a__ ( self: str )-> str: self.assertRaisesRegex(__a , """max_weight must greater than zero.""" ) def a__ ( self: str )-> List[Any]: self.assertRaisesRegex(__a , """Weight can not be negative.""" ) def a__ ( self: Any )-> Dict: self.assertRaisesRegex(__a , """Profit can not be negative.""" ) def a__ ( self: Optional[Any] )-> List[Any]: self.assertRaisesRegex(__a , """max_weight must greater than zero.""" ) def a__ ( self: Optional[Any] )-> Tuple: self.assertRaisesRegex( __a , """The length of profit and weight must be same.""" ) if __name__ == "__main__": unittest.main()
42
0
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCamelCase :Any = { 'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'], 'feature_extraction_mctct': ['MCTCTFeatureExtractor'], 'processing_mctct': ['MCTCTProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase :str = [ 'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST', 'MCTCTForCTC', 'MCTCTModel', 'MCTCTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys __lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
701
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) __lowerCamelCase :List[str] = { 'configuration_owlvit': [ 'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OwlViTConfig', 'OwlViTOnnxConfig', 'OwlViTTextConfig', 'OwlViTVisionConfig', ], 'processing_owlvit': ['OwlViTProcessor'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase :Optional[int] = ['OwlViTFeatureExtractor'] __lowerCamelCase :List[str] = ['OwlViTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase :Optional[Any] = [ 'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'OwlViTModel', 'OwlViTPreTrainedModel', 'OwlViTTextModel', 'OwlViTVisionModel', 'OwlViTForObjectDetection', ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys __lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
42
0
"""simple docstring""" import inspect import unittest from transformers import SegformerConfig, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_MAPPING, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerModel, ) from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import SegformerImageProcessor class A__ ( __lowercase): """simple docstring""" def a__ ( self: Tuple )-> int: lowerCamelCase : Union[str, Any] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__a , """hidden_sizes""" ) ) self.parent.assertTrue(hasattr(__a , """num_attention_heads""" ) ) self.parent.assertTrue(hasattr(__a , """num_encoder_blocks""" ) ) class A__ : """simple docstring""" def __init__( self: Any , __a: Union[str, Any] , __a: Any=13 , __a: Dict=64 , __a: Tuple=3 , __a: List[str]=4 , __a: str=[2, 2, 2, 2] , __a: int=[8, 4, 2, 1] , __a: Optional[Any]=[16, 32, 64, 128] , __a: Optional[int]=[1, 4, 8, 16] , __a: List[Any]=[1, 2, 4, 8] , __a: int=True , __a: Optional[int]=True , __a: List[str]="gelu" , __a: List[str]=0.1 , __a: List[str]=0.1 , __a: List[Any]=0.02 , __a: List[Any]=3 , __a: Optional[Any]=None , )-> List[str]: lowerCamelCase : Dict = parent lowerCamelCase : List[str] = batch_size lowerCamelCase : int = image_size lowerCamelCase : Any = num_channels lowerCamelCase : str = num_encoder_blocks lowerCamelCase : int = sr_ratios lowerCamelCase : Union[str, Any] = depths lowerCamelCase : List[str] = hidden_sizes lowerCamelCase : str = downsampling_rates lowerCamelCase : str = num_attention_heads lowerCamelCase : int = is_training lowerCamelCase : Tuple = use_labels lowerCamelCase : Optional[int] = hidden_act lowerCamelCase : int = hidden_dropout_prob lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob lowerCamelCase : Tuple = initializer_range lowerCamelCase : Union[str, Any] = num_labels lowerCamelCase : Tuple = scope def a__ ( self: Union[str, Any] )-> Union[str, Any]: lowerCamelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase : Dict = None if self.use_labels: lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) lowerCamelCase : Union[str, Any] = self.get_config() return config, pixel_values, labels def a__ ( self: int )-> List[Any]: return SegformerConfig( image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def a__ ( self: str , __a: Any , __a: Dict , __a: Union[str, Any] )-> str: lowerCamelCase : Union[str, Any] = SegformerModel(config=__a ) model.to(__a ) model.eval() lowerCamelCase : Optional[Any] = model(__a ) lowerCamelCase : Optional[int] = self.image_size // (self.downsampling_rates[-1] * 2) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) ) def a__ ( self: str , __a: Any , __a: str , __a: Any )-> Union[str, Any]: lowerCamelCase : Dict = self.num_labels lowerCamelCase : Any = SegformerForSemanticSegmentation(__a ) model.to(__a ) model.eval() lowerCamelCase : Any = model(__a ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) lowerCamelCase : List[Any] = model(__a , labels=__a ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) self.parent.assertGreater(result.loss , 0.0 ) def a__ ( self: int , __a: Tuple , __a: Union[str, Any] , __a: List[str] )-> Tuple: lowerCamelCase : Dict = 1 lowerCamelCase : Optional[Any] = SegformerForSemanticSegmentation(config=__a ) model.to(__a ) model.eval() lowerCamelCase : List[Any] = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(__a ) lowerCamelCase : Optional[int] = model(__a , labels=__a ) self.parent.assertGreater(result.loss , 0.0 ) def a__ ( self: Optional[int] )-> Any: lowerCamelCase : Any = self.prepare_config_and_inputs() lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = config_and_inputs lowerCamelCase : Dict = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class A__ ( __lowercase , __lowercase , unittest.TestCase): """simple docstring""" snake_case__ : List[str] =( ( SegformerModel, SegformerForSemanticSegmentation, SegformerForImageClassification, ) if is_torch_available() else () ) snake_case__ : Dict =( { '''feature-extraction''': SegformerModel, '''image-classification''': SegformerForImageClassification, '''image-segmentation''': SegformerForSemanticSegmentation, } if is_torch_available() else {} ) snake_case__ : Dict =True snake_case__ : Dict =False snake_case__ : Dict =False snake_case__ : Any =False def a__ ( self: Union[str, Any] )-> Union[str, Any]: lowerCamelCase : Any = SegformerModelTester(self ) lowerCamelCase : int = SegformerConfigTester(self , config_class=__a ) def a__ ( self: List[Any] )-> Optional[Any]: self.config_tester.run_common_tests() def a__ ( self: Dict )-> int: lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def a__ ( self: Tuple )-> Any: lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_binary_image_segmentation(*__a ) def a__ ( self: Dict )-> List[str]: lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*__a ) @unittest.skip("""SegFormer does not use inputs_embeds""" ) def a__ ( self: List[Any] )-> Union[str, Any]: pass @unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" ) def a__ ( self: Union[str, Any] )-> str: pass def a__ ( self: Optional[Any] )-> int: lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase : Union[str, Any] = model_class(__a ) lowerCamelCase : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase : int = [*signature.parameters.keys()] lowerCamelCase : int = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __a ) def a__ ( self: int )-> Optional[Any]: lowerCamelCase , lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase : List[str] = True for model_class in self.all_model_classes: lowerCamelCase : str = True lowerCamelCase : Optional[int] = False lowerCamelCase : Union[str, Any] = True lowerCamelCase : str = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): lowerCamelCase : Tuple = model(**self._prepare_for_class(__a , __a ) ) lowerCamelCase : Optional[int] = outputs.attentions lowerCamelCase : Any = sum(self.model_tester.depths ) self.assertEqual(len(__a ) , __a ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowerCamelCase : str = True lowerCamelCase : Dict = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): lowerCamelCase : List[str] = model(**self._prepare_for_class(__a , __a ) ) lowerCamelCase : Union[str, Any] = outputs.attentions self.assertEqual(len(__a ) , __a ) # verify the first attentions (first block, first layer) lowerCamelCase : Tuple = (self.model_tester.image_size // 4) ** 2 lowerCamelCase : Union[str, Any] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) # verify the last attentions (last block, last layer) lowerCamelCase : Optional[Any] = (self.model_tester.image_size // 32) ** 2 lowerCamelCase : Any = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2 self.assertListEqual( list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , ) lowerCamelCase : Union[str, Any] = len(__a ) # Check attention is always last and order is fine lowerCamelCase : Any = True lowerCamelCase : Any = True lowerCamelCase : Optional[int] = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): lowerCamelCase : List[str] = model(**self._prepare_for_class(__a , __a ) ) self.assertEqual(out_len + 1 , len(__a ) ) lowerCamelCase : Optional[int] = outputs.attentions self.assertEqual(len(__a ) , __a ) # verify the first attentions (first block, first layer) lowerCamelCase : List[str] = (self.model_tester.image_size // 4) ** 2 lowerCamelCase : Dict = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) def a__ ( self: List[Any] )-> int: def check_hidden_states_output(__a: str , __a: int , __a: Union[str, Any] ): lowerCamelCase : Union[str, Any] = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): lowerCamelCase : int = model(**self._prepare_for_class(__a , __a ) ) lowerCamelCase : List[Any] = outputs.hidden_states lowerCamelCase : List[Any] = self.model_tester.num_encoder_blocks self.assertEqual(len(__a ) , __a ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.hidden_sizes[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) lowerCamelCase , lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase : List[str] = True check_hidden_states_output(__a , __a , __a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase : List[str] = True check_hidden_states_output(__a , __a , __a ) def a__ ( self: List[Any] )-> Optional[Any]: if not self.model_tester.is_training: return lowerCamelCase , lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase : Union[str, Any] = True for model_class in self.all_model_classes: if model_class in get_values(__a ): continue lowerCamelCase : Tuple = model_class(__a ) model.to(__a ) model.train() lowerCamelCase : Optional[int] = self._prepare_for_class(__a , __a , return_labels=__a ) lowerCamelCase : Dict = model(**__a ).loss loss.backward() @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def a__ ( self: Tuple )-> int: pass @slow def a__ ( self: Union[str, Any] )-> Union[str, Any]: for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase : List[Any] = SegformerModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def snake_case ( ) -> Any: lowerCamelCase : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch class A__ ( unittest.TestCase): """simple docstring""" @slow def a__ ( self: Optional[Any] )-> Union[str, Any]: # only resize + normalize lowerCamelCase : Dict = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=__a , align=__a , do_random_crop=__a ) lowerCamelCase : Union[str, Any] = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to( __a ) lowerCamelCase : Optional[Any] = prepare_img() lowerCamelCase : List[str] = image_processor(images=__a , return_tensors="""pt""" ) lowerCamelCase : Dict = encoded_inputs.pixel_values.to(__a ) with torch.no_grad(): lowerCamelCase : int = model(__a ) lowerCamelCase : str = torch.Size((1, model.config.num_labels, 128, 128) ) self.assertEqual(outputs.logits.shape , __a ) lowerCamelCase : Optional[int] = torch.tensor( [ [[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]], [[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]], [[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]], ] ).to(__a ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __a , atol=1e-4 ) ) @slow def a__ ( self: Union[str, Any] )-> int: # only resize + normalize lowerCamelCase : Tuple = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=__a , align=__a , do_random_crop=__a ) lowerCamelCase : Any = SegformerForSemanticSegmentation.from_pretrained( """nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(__a ) lowerCamelCase : List[Any] = prepare_img() lowerCamelCase : Tuple = image_processor(images=__a , return_tensors="""pt""" ) lowerCamelCase : Optional[Any] = encoded_inputs.pixel_values.to(__a ) with torch.no_grad(): lowerCamelCase : int = model(__a ) lowerCamelCase : Union[str, Any] = torch.Size((1, model.config.num_labels, 128, 128) ) self.assertEqual(outputs.logits.shape , __a ) lowerCamelCase : Optional[int] = torch.tensor( [ [[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]], [[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]], [[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]], ] ).to(__a ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __a , atol=1e-1 ) ) @slow def a__ ( self: Optional[int] )-> Any: # only resize + normalize lowerCamelCase : List[Any] = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=__a , align=__a , do_random_crop=__a ) lowerCamelCase : Dict = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to( __a ) lowerCamelCase : Dict = prepare_img() lowerCamelCase : Any = image_processor(images=__a , return_tensors="""pt""" ) lowerCamelCase : Tuple = encoded_inputs.pixel_values.to(__a ) with torch.no_grad(): lowerCamelCase : int = model(__a ) lowerCamelCase : List[Any] = outputs.logits.detach().cpu() lowerCamelCase : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=__a , target_sizes=[(500, 300)] ) lowerCamelCase : str = torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape , __a ) lowerCamelCase : List[Any] = image_processor.post_process_semantic_segmentation(outputs=__a ) lowerCamelCase : Optional[int] = torch.Size((128, 128) ) self.assertEqual(segmentation[0].shape , __a )
702
"""simple docstring""" import collections import inspect import unittest from transformers import FocalNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, ) from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A__ : """simple docstring""" def __init__( self: List[Any] , __a: List[str] , __a: Optional[int]=13 , __a: List[str]=32 , __a: int=2 , __a: List[str]=3 , __a: Union[str, Any]=16 , __a: int=[32, 64, 128] , __a: Optional[Any]=[1, 2, 1] , __a: Optional[int]=[2, 2, 4] , __a: Tuple=2 , __a: Dict=2.0 , __a: List[str]=True , __a: Optional[Any]=0.0 , __a: Any=0.0 , __a: List[Any]=0.1 , __a: List[str]="gelu" , __a: Tuple=False , __a: Union[str, Any]=True , __a: Optional[int]=0.02 , __a: Tuple=1e-5 , __a: int=True , __a: List[Any]=None , __a: Optional[int]=True , __a: Dict=10 , __a: List[str]=8 , __a: Any=["stage1", "stage2"] , __a: Union[str, Any]=[1, 2] , )-> Dict: lowerCamelCase : Dict = parent lowerCamelCase : Optional[Any] = batch_size lowerCamelCase : Union[str, Any] = image_size lowerCamelCase : Optional[int] = patch_size lowerCamelCase : Any = num_channels lowerCamelCase : Any = embed_dim lowerCamelCase : Dict = hidden_sizes lowerCamelCase : List[Any] = depths lowerCamelCase : Tuple = num_heads lowerCamelCase : List[Any] = window_size lowerCamelCase : str = mlp_ratio lowerCamelCase : str = qkv_bias lowerCamelCase : str = hidden_dropout_prob lowerCamelCase : Dict = attention_probs_dropout_prob lowerCamelCase : Tuple = drop_path_rate lowerCamelCase : Dict = hidden_act lowerCamelCase : Tuple = use_absolute_embeddings lowerCamelCase : List[str] = patch_norm lowerCamelCase : List[str] = layer_norm_eps lowerCamelCase : str = initializer_range lowerCamelCase : Tuple = is_training lowerCamelCase : int = scope lowerCamelCase : Union[str, Any] = use_labels lowerCamelCase : List[str] = type_sequence_label_size lowerCamelCase : str = encoder_stride lowerCamelCase : List[str] = out_features lowerCamelCase : Optional[int] = out_indices def a__ ( self: Optional[Any] )-> Union[str, Any]: lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase : str = None if self.use_labels: lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase : str = self.get_config() return config, pixel_values, labels def a__ ( self: List[Any] )-> Optional[int]: return FocalNetConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def a__ ( self: Tuple , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]: lowerCamelCase : Tuple = FocalNetModel(config=__a ) model.to(__a ) model.eval() lowerCamelCase : Tuple = model(__a ) lowerCamelCase : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowerCamelCase : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def a__ ( self: Optional[int] , __a: Dict , __a: Tuple , __a: List[Any] )-> int: lowerCamelCase : List[Any] = FocalNetBackbone(config=__a ) model.to(__a ) model.eval() lowerCamelCase : Optional[Any] = model(__a ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] ) # verify backbone works with out_features=None lowerCamelCase : Dict = None lowerCamelCase : Dict = FocalNetBackbone(config=__a ) model.to(__a ) model.eval() lowerCamelCase : Any = model(__a ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def a__ ( self: Optional[int] , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]: lowerCamelCase : Tuple = FocalNetForMaskedImageModeling(config=__a ) model.to(__a ) model.eval() lowerCamelCase : List[str] = model(__a ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCamelCase : List[str] = 1 lowerCamelCase : Any = FocalNetForMaskedImageModeling(__a ) model.to(__a ) model.eval() lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase : Tuple = model(__a ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def a__ ( self: str , __a: Optional[Any] , __a: Optional[Any] , __a: Tuple )-> str: lowerCamelCase : Optional[Any] = self.type_sequence_label_size lowerCamelCase : Optional[Any] = FocalNetForImageClassification(__a ) model.to(__a ) model.eval() lowerCamelCase : List[str] = model(__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCamelCase : int = 1 lowerCamelCase : List[Any] = FocalNetForImageClassification(__a ) model.to(__a ) model.eval() lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase : Optional[Any] = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def a__ ( self: int )-> Optional[int]: lowerCamelCase : str = self.prepare_config_and_inputs() lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = config_and_inputs lowerCamelCase : List[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class A__ ( __lowercase , __lowercase , unittest.TestCase): """simple docstring""" snake_case__ : List[str] =( ( FocalNetModel, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetBackbone, ) if is_torch_available() else () ) snake_case__ : Optional[int] =( {'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification} if is_torch_available() else {} ) snake_case__ : Tuple =False snake_case__ : Dict =False snake_case__ : Dict =False snake_case__ : Tuple =False snake_case__ : Optional[int] =False def a__ ( self: Union[str, Any] )-> Optional[int]: lowerCamelCase : List[str] = FocalNetModelTester(self ) lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__a , embed_dim=37 , has_text_modality=__a ) def a__ ( self: List[str] )-> List[str]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a__ ( self: List[str] )-> Union[str, Any]: return def a__ ( self: Tuple )-> Tuple: lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def a__ ( self: List[Any] )-> Dict: lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__a ) def a__ ( self: List[Any] )-> Tuple: lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__a ) def a__ ( self: List[str] )-> Dict: lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a ) @unittest.skip(reason="""FocalNet does not use inputs_embeds""" ) def a__ ( self: Optional[Any] )-> str: pass @unittest.skip(reason="""FocalNet does not use feedforward chunking""" ) def a__ ( self: Optional[Any] )-> Dict: pass def a__ ( self: Optional[Any] )-> Dict: lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: lowerCamelCase : Any = model_class(__a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCamelCase : Dict = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__a , nn.Linear ) ) def a__ ( self: Tuple )-> Optional[int]: lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: lowerCamelCase : int = model_class(__a ) lowerCamelCase : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase : Any = [*signature.parameters.keys()] lowerCamelCase : List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __a ) def a__ ( self: str , __a: Union[str, Any] , __a: int , __a: Tuple , __a: List[str] )-> Union[str, Any]: lowerCamelCase : List[Any] = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): lowerCamelCase : List[str] = model(**self._prepare_for_class(__a , __a ) ) lowerCamelCase : List[str] = outputs.hidden_states lowerCamelCase : Tuple = getattr( self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(__a ) , __a ) # FocalNet has a different seq_length lowerCamelCase : Tuple = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCamelCase : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) lowerCamelCase : Optional[Any] = outputs.reshaped_hidden_states self.assertEqual(len(__a ) , __a ) lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = reshaped_hidden_states[0].shape lowerCamelCase : Tuple = ( reshaped_hidden_states[0].view(__a , __a , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def a__ ( self: Any )-> Any: lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase : Union[str, Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes[:-1]: lowerCamelCase : List[str] = True self.check_hidden_states_output(__a , __a , __a , __a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase : List[Any] = True self.check_hidden_states_output(__a , __a , __a , __a ) def a__ ( self: str )-> Union[str, Any]: lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase : List[str] = 3 lowerCamelCase : Any = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowerCamelCase : Optional[int] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCamelCase : Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowerCamelCase : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes[:-1]: lowerCamelCase : str = True self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase : Union[str, Any] = True self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) ) @slow def a__ ( self: Optional[int] )-> List[Any]: for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase : List[str] = FocalNetModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def a__ ( self: str )-> Any: lowerCamelCase , lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase : int = _config_zero_init(__a ) for model_class in self.all_model_classes: lowerCamelCase : int = model_class(config=__a ) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) @require_vision @require_torch class A__ ( unittest.TestCase): """simple docstring""" @cached_property def a__ ( self: Optional[int] )-> Optional[Any]: # TODO update organization return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None @slow def a__ ( self: int )-> Optional[Any]: lowerCamelCase : Tuple = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(__a ) lowerCamelCase : Any = self.default_image_processor lowerCamelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) lowerCamelCase : int = image_processor(images=__a , return_tensors="""pt""" ).to(__a ) # forward pass with torch.no_grad(): lowerCamelCase : Any = model(**__a ) # verify the logits lowerCamelCase : Tuple = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , __a ) lowerCamelCase : List[str] = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(__a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) ) self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 ) @require_torch class A__ ( __lowercase , unittest.TestCase): """simple docstring""" snake_case__ : str =(FocalNetBackbone,) if is_torch_available() else () snake_case__ : Optional[int] =FocalNetConfig snake_case__ : str =False def a__ ( self: Union[str, Any] )-> Tuple: lowerCamelCase : str = FocalNetModelTester(self )
42
0
"""simple docstring""" import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin __lowerCamelCase :Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.plbart.modeling_plbart import shift_tokens_right __lowerCamelCase :str = 50_003 __lowerCamelCase :List[Any] = 50_002 @require_sentencepiece @require_tokenizers class A__ ( UpperCamelCase_ , unittest.TestCase): """simple docstring""" snake_case__ : List[str] =PLBartTokenizer snake_case__ : Any =None snake_case__ : List[Any] =False def a__ ( self: int )-> int: super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase : List[Any] = PLBartTokenizer(__A , language_codes="""base""" , keep_accents=__A ) tokenizer.save_pretrained(self.tmpdirname ) def a__ ( self: Tuple )-> Optional[int]: lowerCamelCase : Tuple = PLBartTokenizer(__A , language_codes="""base""" , keep_accents=__A ) lowerCamelCase : Optional[int] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(__A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) lowerCamelCase : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( __A , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) lowerCamelCase : Optional[int] = tokenizer.convert_tokens_to_ids(__A ) self.assertListEqual( __A , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) lowerCamelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(__A ) self.assertListEqual( __A , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) lowerCamelCase : int = tokenizer.vocab_size lowerCamelCase : Tuple = [tokenizer.convert_ids_to_tokens(__A ) for x in range(end - 4 , __A )] self.assertListEqual(__A , ["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] ) lowerCamelCase : Tuple = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go" lowerCamelCase : Dict = tokenizer(__A ).input_ids self.assertEqual( tokenizer.decode(__A , skip_special_tokens=__A , clean_up_tokenization_spaces=__A ) , __A , ) def a__ ( self: Dict )-> List[str]: lowerCamelCase : Dict = PLBartTokenizer(__A , language_codes="""multi""" , keep_accents=__A ) lowerCamelCase : Optional[Any] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(__A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) lowerCamelCase : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( __A , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) lowerCamelCase : Dict = tokenizer.convert_tokens_to_ids(__A ) self.assertListEqual( __A , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) lowerCamelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(__A ) self.assertListEqual( __A , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) lowerCamelCase : List[Any] = tokenizer.vocab_size lowerCamelCase : Union[str, Any] = [tokenizer.convert_ids_to_tokens(__A ) for x in range(end - 7 , __A )] self.assertListEqual( __A , ["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] ) lowerCamelCase : str = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go" lowerCamelCase : List[Any] = tokenizer(__A ).input_ids self.assertEqual( tokenizer.decode(__A , skip_special_tokens=__A , clean_up_tokenization_spaces=__A ) , __A , ) @require_torch @require_sentencepiece @require_tokenizers class A__ ( unittest.TestCase): """simple docstring""" snake_case__ : int ='''uclanlp/plbart-python-en_XX''' snake_case__ : List[Any] =[ '''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''', '''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''', ] snake_case__ : Tuple =[ '''Returns the maximum value of a b c.''', '''Sums the values of a b c.''', ] snake_case__ : Any =[ 1_34, 54_52, 3_34_60, 3_34_41, 3_34_63, 3_34_65, 3_34_63, 3_34_49, 9_88, 20, 3_34_56, 19, 3_34_56, 7_71, 39, 42_58, 8_89, 33_18, 3_34_41, 3_34_63, 3_34_65, 3_34_63, 3_34_49, 24_71, 2, PYTHON_CODE, ] @classmethod def a__ ( cls: Optional[int] )-> List[str]: lowerCamelCase : PLBartTokenizer = PLBartTokenizer.from_pretrained( cls.checkpoint_name , language_codes="""base""" , src_lang="""python""" , tgt_lang="""en_XX""" ) lowerCamelCase : Optional[Any] = 1 return cls def a__ ( self: Optional[Any] )-> List[Any]: self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] , 50_001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] , 50_002 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] , 50_003 ) def a__ ( self: Optional[Any] )-> List[str]: lowerCamelCase : str = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , __A ) def a__ ( self: Optional[Any] )-> Tuple: self.assertIn(__A , self.tokenizer.all_special_ids ) lowerCamelCase : List[str] = [EN_CODE, 9_037, 33_442, 57, 752, 153, 14, 56, 18, 9, 2] lowerCamelCase : List[str] = self.tokenizer.decode(__A , skip_special_tokens=__A ) lowerCamelCase : int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__A ) self.assertEqual(__A , __A ) self.assertNotIn(self.tokenizer.eos_token , __A ) def a__ ( self: Optional[Any] )-> Optional[Any]: lowerCamelCase : int = ["def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])" * 20] self.assertIsInstance(src_text[0] , __A ) lowerCamelCase : Dict = 10 lowerCamelCase : List[Any] = self.tokenizer(__A , max_length=__A , truncation=__A ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , __A ) self.assertEqual(len(__A ) , __A ) def a__ ( self: Optional[Any] )-> Dict: self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ) , [50_004, 50_001] ) def a__ ( self: Optional[int] )-> List[Any]: lowerCamelCase : str = tempfile.mkdtemp() lowerCamelCase : List[Any] = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(__A ) lowerCamelCase : List[Any] = PLBartTokenizer.from_pretrained(__A ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __A ) @require_torch def a__ ( self: Optional[Any] )-> Any: lowerCamelCase : Any = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__A , return_tensors="""pt""" ) lowerCamelCase : int = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] ) self.assertEqual(batch.decoder_input_ids[1][0] , __A ) self.assertEqual(batch.decoder_input_ids[1][-1] , 2 ) self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] ) @require_torch def a__ ( self: Optional[Any] )-> Union[str, Any]: lowerCamelCase : Optional[int] = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=__A , truncation=__A , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , ) lowerCamelCase : Optional[Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) self.assertIsInstance(__A , __A ) self.assertEqual((2, 26) , batch.input_ids.shape ) self.assertEqual((2, 26) , batch.attention_mask.shape ) lowerCamelCase : Optional[int] = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , __A ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] ) def a__ ( self: Any )-> List[str]: lowerCamelCase : List[Any] = self.tokenizer(self.src_text , padding=__A , truncation=__A , max_length=3 , return_tensors="""pt""" ) lowerCamelCase : str = self.tokenizer( text_target=self.tgt_text , padding=__A , truncation=__A , max_length=10 , return_tensors="""pt""" ) lowerCamelCase : int = targets["input_ids"] lowerCamelCase : Tuple = shift_tokens_right(__A , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def a__ ( self: Tuple )-> Optional[int]: lowerCamelCase : str = self.tokenizer._build_translation_inputs( """A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""java""" ) self.assertEqual( nested_simplify(__A ) , { # A, test, EOS, en_XX """input_ids""": [[150, 242, 2, 50_003]], """attention_mask""": [[1, 1, 1, 1]], # java """forced_bos_token_id""": 50_001, } , )
703
"""simple docstring""" import os def snake_case ( ) -> Optional[Any]: with open(os.path.dirname(UpperCamelCase__ ) + """/grid.txt""" ) as f: lowerCamelCase : int = [] # noqa: E741 for _ in range(20 ): l.append([int(UpperCamelCase__ ) for x in f.readline().split()] ) lowerCamelCase : Union[str, Any] = 0 # right for i in range(20 ): for j in range(17 ): lowerCamelCase : Dict = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3] if temp > maximum: lowerCamelCase : Tuple = temp # down for i in range(17 ): for j in range(20 ): lowerCamelCase : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j] if temp > maximum: lowerCamelCase : Optional[Any] = temp # diagonal 1 for i in range(17 ): for j in range(17 ): lowerCamelCase : List[Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3] if temp > maximum: lowerCamelCase : List[str] = temp # diagonal 2 for i in range(17 ): for j in range(3 , 20 ): lowerCamelCase : List[str] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3] if temp > maximum: lowerCamelCase : List[Any] = temp return maximum if __name__ == "__main__": print(solution())
42
0
"""simple docstring""" from random import randint from tempfile import TemporaryFile import numpy as np def snake_case ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] ) -> Optional[Any]: lowerCamelCase : Optional[Any] = 0 if start < end: lowerCamelCase : Union[str, Any] = randint(_lowerCamelCase , _lowerCamelCase ) lowerCamelCase : str = a[end] lowerCamelCase : List[str] = a[pivot] lowerCamelCase : Tuple = temp lowerCamelCase : int = _in_place_partition(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) count += _in_place_quick_sort(_lowerCamelCase , _lowerCamelCase , p - 1 ) count += _in_place_quick_sort(_lowerCamelCase , p + 1 , _lowerCamelCase ) return count def snake_case ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] ) -> str: lowerCamelCase : Tuple = 0 lowerCamelCase : Optional[int] = randint(_lowerCamelCase , _lowerCamelCase ) lowerCamelCase : List[Any] = a[end] lowerCamelCase : Optional[int] = a[pivot] lowerCamelCase : List[Any] = temp lowerCamelCase : Union[str, Any] = start - 1 for index in range(_lowerCamelCase , _lowerCamelCase ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value lowerCamelCase : Optional[int] = new_pivot_index + 1 lowerCamelCase : Optional[Any] = a[new_pivot_index] lowerCamelCase : Tuple = a[index] lowerCamelCase : Any = temp lowerCamelCase : List[Any] = a[new_pivot_index + 1] lowerCamelCase : Dict = a[end] lowerCamelCase : Optional[Any] = temp return new_pivot_index + 1, count __lowerCamelCase :str = TemporaryFile() __lowerCamelCase :List[Any] = 100 # 1000 elements are to be sorted __lowerCamelCase :Dict = 0, 1 # mean and standard deviation __lowerCamelCase :str = np.random.normal(mu, sigma, p) np.save(outfile, X) print('The array is') print(X) outfile.seek(0) # using the same array __lowerCamelCase :Tuple = np.load(outfile) __lowerCamelCase :int = len(M) - 1 __lowerCamelCase :Tuple = _in_place_quick_sort(M, 0, r) print( 'No of Comparisons for 100 elements selected from a standard normal distribution' 'is :' ) print(z)
704
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionAttendAndExcitePipeline, UNetaDConditionModel, ) from diffusers.utils import load_numpy, skip_mps, slow from diffusers.utils.testing_utils import require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin __lowerCamelCase :Any = False @skip_mps class A__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase): """simple docstring""" snake_case__ : Optional[Any] =StableDiffusionAttendAndExcitePipeline snake_case__ : Any =False snake_case__ : Dict =TEXT_TO_IMAGE_PARAMS snake_case__ : Any =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''}) snake_case__ : Dict =TEXT_TO_IMAGE_IMAGE_PARAMS snake_case__ : str =TEXT_TO_IMAGE_IMAGE_PARAMS @classmethod def a__ ( cls: Dict )-> Tuple: super().setUpClass() torch.use_deterministic_algorithms(__a ) @classmethod def a__ ( cls: Union[str, Any] )-> Any: super().tearDownClass() torch.use_deterministic_algorithms(__a ) def a__ ( self: Tuple )-> Union[str, Any]: torch.manual_seed(0 ) lowerCamelCase : str = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , ) lowerCamelCase : Union[str, Any] = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=__a , set_alpha_to_one=__a , ) torch.manual_seed(0 ) lowerCamelCase : Union[str, Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) lowerCamelCase : str = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , ) lowerCamelCase : Optional[int] = CLIPTextModel(__a ) lowerCamelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowerCamelCase : List[str] = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def a__ ( self: Tuple , __a: int , __a: Union[str, Any]=0 )-> Optional[Any]: if str(__a ).startswith("""mps""" ): lowerCamelCase : Tuple = torch.manual_seed(__a ) else: lowerCamelCase : str = torch.Generator(device=__a ).manual_seed(__a ) lowerCamelCase : Dict = { """prompt""": """a cat and a frog""", """token_indices""": [2, 5], """generator""": generator, """num_inference_steps""": 1, """guidance_scale""": 6.0, """output_type""": """numpy""", """max_iter_to_alter""": 2, """thresholds""": {0: 0.7}, } return inputs def a__ ( self: Dict )-> str: lowerCamelCase : Tuple = """cpu""" lowerCamelCase : List[str] = self.get_dummy_components() lowerCamelCase : List[Any] = self.pipeline_class(**__a ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) lowerCamelCase : Any = self.get_dummy_inputs(__a ) lowerCamelCase : Union[str, Any] = pipe(**__a ).images lowerCamelCase : Tuple = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 64, 64, 3) ) lowerCamelCase : Optional[Any] = np.array( [0.63_90_53_64, 0.62_89_73_07, 0.48_59_90_17, 0.5_13_36_24, 0.5_55_00_48, 0.45_76_95_16, 0.50_32_69_73, 0.5_02_31_39, 0.45_38_44_96] ) lowerCamelCase : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(__a , 1e-3 ) def a__ ( self: int )-> Optional[Any]: super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 ) def a__ ( self: Union[str, Any] )-> Optional[int]: # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def a__ ( self: Tuple )-> int: self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 ) def a__ ( self: Dict )-> List[Any]: super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 ) def a__ ( self: Optional[int] )-> Dict: super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 ) def a__ ( self: Any )-> Tuple: super().test_save_load_local(expected_max_difference=5e-4 ) def a__ ( self: str )-> str: super().test_save_load_optional_components(expected_max_difference=4e-4 ) @require_torch_gpu @slow class A__ ( unittest.TestCase): """simple docstring""" @classmethod def a__ ( cls: Any )-> Tuple: super().setUpClass() torch.use_deterministic_algorithms(__a ) @classmethod def a__ ( cls: Dict )-> Optional[int]: super().tearDownClass() torch.use_deterministic_algorithms(__a ) def a__ ( self: int )-> Optional[int]: super().tearDown() gc.collect() torch.cuda.empty_cache() def a__ ( self: int )-> Optional[Any]: lowerCamelCase : List[Any] = torch.manual_seed(51 ) lowerCamelCase : List[str] = StableDiffusionAttendAndExcitePipeline.from_pretrained( """CompVis/stable-diffusion-v1-4""" , safety_checker=__a , torch_dtype=torch.floataa ) pipe.to("""cuda""" ) lowerCamelCase : Dict = """a painting of an elephant with glasses""" lowerCamelCase : Any = [5, 7] lowerCamelCase : Tuple = pipe( prompt=__a , token_indices=__a , guidance_scale=7.5 , generator=__a , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0] lowerCamelCase : Union[str, Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" ) assert np.abs((expected_image - image).max() ) < 5e-1
42
0
"""simple docstring""" def snake_case ( UpperCamelCase__ : int ) -> Dict: lowerCamelCase : int = generate_pascal_triangle(UpperCamelCase__ ) for row_idx in range(UpperCamelCase__ ): # Print left spaces for _ in range(num_rows - row_idx - 1 ): print(end=""" """ ) # Print row values for col_idx in range(row_idx + 1 ): if col_idx != row_idx: print(triangle[row_idx][col_idx] , end=""" """ ) else: print(triangle[row_idx][col_idx] , end="""""" ) print() def snake_case ( UpperCamelCase__ : int ) -> Optional[int]: if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise TypeError("""The input value of 'num_rows' should be 'int'""" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( """The input value of 'num_rows' should be greater than or equal to 0""" ) lowerCamelCase : list[list[int]] = [] for current_row_idx in range(UpperCamelCase__ ): lowerCamelCase : Dict = populate_current_row(UpperCamelCase__ , UpperCamelCase__ ) triangle.append(UpperCamelCase__ ) return triangle def snake_case ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int ) -> Union[str, Any]: lowerCamelCase : List[Any] = [-1] * (current_row_idx + 1) # first and last elements of current row are equal to 1 lowerCamelCase : Any = 1, 1 for current_col_idx in range(1 , UpperCamelCase__ ): calculate_current_element( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return current_row def snake_case ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , ) -> str: lowerCamelCase : int = triangle[current_row_idx - 1][current_col_idx - 1] lowerCamelCase : int = triangle[current_row_idx - 1][current_col_idx] lowerCamelCase : int = above_to_left_elt + above_to_right_elt def snake_case ( UpperCamelCase__ : int ) -> Tuple: if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise TypeError("""The input value of 'num_rows' should be 'int'""" ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( """The input value of 'num_rows' should be greater than or equal to 0""" ) lowerCamelCase : list[list[int]] = [[1]] for row_index in range(1 , UpperCamelCase__ ): lowerCamelCase : Dict = [0] + result[-1] + [0] lowerCamelCase : Any = row_index + 1 # Calculate the number of distinct elements in a row lowerCamelCase : Optional[Any] = sum(divmod(UpperCamelCase__ , 2 ) ) lowerCamelCase : Tuple = [ temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 ) ] lowerCamelCase : str = row_first_half[: (row_index + 1) // 2] row_second_half.reverse() lowerCamelCase : Optional[int] = row_first_half + row_second_half result.append(UpperCamelCase__ ) return result def snake_case ( ) -> List[Any]: from collections.abc import Callable from timeit import timeit def benchmark_a_function(UpperCamelCase__ : Callable , UpperCamelCase__ : int ) -> None: lowerCamelCase : Union[str, Any] = F'{func.__name__}({value})' lowerCamelCase : str = timeit(F'__main__.{call}' , setup="""import __main__""" ) # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") print(F'{call:38} -- {timing:.4f} seconds' ) for value in range(15 ): # (1, 7, 14): for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): benchmark_a_function(UpperCamelCase__ , UpperCamelCase__ ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
705
"""simple docstring""" import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel from transformers.models.esm.modeling_esm import ( ESM_PRETRAINED_MODEL_ARCHIVE_LIST, EsmEmbeddings, create_position_ids_from_input_ids, ) class A__ : """simple docstring""" def __init__( self: List[str] , __a: List[str] , __a: Dict=13 , __a: Tuple=7 , __a: Dict=False , __a: str=True , __a: List[Any]=False , __a: Dict=True , __a: Any=33 , __a: Optional[Any]=32 , __a: List[Any]=5 , __a: Any=4 , __a: Dict=37 , __a: str="gelu" , __a: str=0.1 , __a: int=0.1 , __a: Optional[int]=512 , __a: List[Any]=16 , __a: int=2 , __a: int=0.02 , __a: Optional[int]=3 , __a: str=4 , __a: Tuple=None , )-> Tuple: lowerCamelCase : Union[str, Any] = parent lowerCamelCase : Tuple = batch_size lowerCamelCase : Any = seq_length lowerCamelCase : Any = is_training lowerCamelCase : Tuple = use_input_mask lowerCamelCase : int = use_token_type_ids lowerCamelCase : List[str] = use_labels lowerCamelCase : Optional[int] = vocab_size lowerCamelCase : Tuple = hidden_size lowerCamelCase : List[str] = num_hidden_layers lowerCamelCase : Optional[int] = num_attention_heads lowerCamelCase : Optional[Any] = intermediate_size lowerCamelCase : Optional[Any] = hidden_act lowerCamelCase : Union[str, Any] = hidden_dropout_prob lowerCamelCase : Optional[Any] = attention_probs_dropout_prob lowerCamelCase : Any = max_position_embeddings lowerCamelCase : str = type_vocab_size lowerCamelCase : List[Any] = type_sequence_label_size lowerCamelCase : Optional[Any] = initializer_range lowerCamelCase : Union[str, Any] = num_labels lowerCamelCase : Optional[Any] = num_choices lowerCamelCase : Any = scope def a__ ( self: Optional[int] )-> List[Any]: lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase : Dict = None if self.use_input_mask: lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase : Any = None lowerCamelCase : int = None lowerCamelCase : Union[str, Any] = None if self.use_labels: lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase : List[str] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def a__ ( self: Tuple )-> Union[str, Any]: return EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def a__ ( self: List[Any] , __a: List[str] , __a: str , __a: Tuple , __a: List[str] , __a: List[str] , __a: str )-> int: lowerCamelCase : Optional[int] = EsmModel(config=__a ) model.to(__a ) model.eval() lowerCamelCase : int = model(__a , attention_mask=__a ) lowerCamelCase : str = model(__a ) lowerCamelCase : Optional[Any] = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def a__ ( self: int , __a: Union[str, Any] , __a: Optional[int] , __a: List[str] , __a: str , __a: List[str] , __a: Tuple )-> int: lowerCamelCase : str = EsmForMaskedLM(config=__a ) model.to(__a ) model.eval() lowerCamelCase : List[Any] = model(__a , attention_mask=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a__ ( self: List[str] , __a: List[Any] , __a: List[str] , __a: int , __a: Union[str, Any] , __a: List[Any] , __a: Tuple )-> List[str]: lowerCamelCase : Tuple = self.num_labels lowerCamelCase : Dict = EsmForTokenClassification(config=__a ) model.to(__a ) model.eval() lowerCamelCase : int = model(__a , attention_mask=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def a__ ( self: Optional[int] )-> Optional[int]: lowerCamelCase : Any = self.prepare_config_and_inputs() ( ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ) : Tuple = config_and_inputs lowerCamelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class A__ ( __lowercase , __lowercase , unittest.TestCase): """simple docstring""" snake_case__ : Any =False snake_case__ : Dict =( ( EsmForMaskedLM, EsmModel, EsmForSequenceClassification, EsmForTokenClassification, ) if is_torch_available() else () ) snake_case__ : Dict =() snake_case__ : Optional[int] =( { '''feature-extraction''': EsmModel, '''fill-mask''': EsmForMaskedLM, '''text-classification''': EsmForSequenceClassification, '''token-classification''': EsmForTokenClassification, '''zero-shot''': EsmForSequenceClassification, } if is_torch_available() else {} ) snake_case__ : Any =True def a__ ( self: Optional[int] )-> Optional[int]: lowerCamelCase : Optional[Any] = EsmModelTester(self ) lowerCamelCase : Any = ConfigTester(self , config_class=__a , hidden_size=37 ) def a__ ( self: List[Any] )-> Optional[Any]: self.config_tester.run_common_tests() def a__ ( self: int )-> Optional[Any]: lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def a__ ( self: Tuple )-> Any: lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCamelCase : Tuple = type self.model_tester.create_and_check_model(*__a ) def a__ ( self: List[str] )-> Tuple: lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__a ) def a__ ( self: int )-> Optional[Any]: lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__a ) @slow def a__ ( self: Any )-> List[Any]: for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase : int = EsmModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def a__ ( self: str )-> List[str]: lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0] lowerCamelCase : Union[str, Any] = EsmEmbeddings(config=__a ) lowerCamelCase : List[str] = torch.as_tensor([[12, 31, 13, model.padding_idx]] ) lowerCamelCase : Union[str, Any] = torch.as_tensor( [ [ 0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx, ] ] ) lowerCamelCase : Optional[Any] = create_position_ids_from_input_ids(__a , model.padding_idx ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__a , __a ) ) ) def a__ ( self: Optional[int] )-> int: lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()[0] lowerCamelCase : Any = EsmEmbeddings(config=__a ) lowerCamelCase : Dict = torch.empty(2 , 4 , 30 ) lowerCamelCase : List[Any] = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] lowerCamelCase : Any = torch.as_tensor([expected_single_positions, expected_single_positions] ) lowerCamelCase : List[str] = embeddings.create_position_ids_from_inputs_embeds(__a ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__a , __a ) ) ) @unittest.skip("""Esm does not support embedding resizing""" ) def a__ ( self: Any )-> Optional[Any]: pass @unittest.skip("""Esm does not support embedding resizing""" ) def a__ ( self: Dict )-> Dict: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def a__ ( self: List[str] )-> Dict: pass @require_torch class A__ ( __lowercase): """simple docstring""" @slow def a__ ( self: Any )-> Union[str, Any]: with torch.no_grad(): lowerCamelCase : Union[str, Any] = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" ) model.eval() lowerCamelCase : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] ) lowerCamelCase : Tuple = model(__a )[0] lowerCamelCase : Dict = 33 lowerCamelCase : List[str] = torch.Size((1, 6, vocab_size) ) self.assertEqual(output.shape , __a ) lowerCamelCase : Tuple = torch.tensor( [[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) ) @slow def a__ ( self: Dict )-> str: with torch.no_grad(): lowerCamelCase : Any = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" ) model.eval() lowerCamelCase : Optional[Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) lowerCamelCase : Any = model(__a )[0] # compare the actual values for a slice. lowerCamelCase : Tuple = torch.tensor( [[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
42
0
"""simple docstring""" from abc import ABC, abstractmethod from typing import List, Optional class A__ ( __a): """simple docstring""" def __init__( self: Dict )-> List[str]: self.test() def a__ ( self: Optional[Any] )-> Dict: lowerCamelCase : Tuple = 0 lowerCamelCase : List[Any] = False while not completed: if counter == 1: self.reset() lowerCamelCase : List[str] = self.advance() if not self.does_advance(a_ ): raise Exception( """Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.""" ) lowerCamelCase : Union[str, Any] = self.update(a_ ) counter += 1 if counter > 10_000: raise Exception("""update() does not fulfill the constraint.""" ) if self.remaining() != 0: raise Exception("""Custom Constraint is not defined correctly.""" ) @abstractmethod def a__ ( self: Optional[int] )-> Union[str, Any]: raise NotImplementedError( f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) @abstractmethod def a__ ( self: Optional[int] , __a: int )-> Union[str, Any]: raise NotImplementedError( f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) @abstractmethod def a__ ( self: Optional[int] , __a: int )-> List[str]: raise NotImplementedError( f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) @abstractmethod def a__ ( self: Tuple )-> Tuple: raise NotImplementedError( f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) @abstractmethod def a__ ( self: int )-> int: raise NotImplementedError( f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) @abstractmethod def a__ ( self: List[str] , __a: Tuple=False )-> str: raise NotImplementedError( f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) class A__ ( __a): """simple docstring""" def __init__( self: Tuple , __a: List[int] )-> List[str]: super(a_ , self ).__init__() if not isinstance(a_ , a_ ) or len(a_ ) == 0: raise ValueError(f'`token_ids` has to be a non-empty list, but is {token_ids}.' ) if any((not isinstance(a_ , a_ ) or token_id < 0) for token_id in token_ids ): raise ValueError(f'Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.' ) lowerCamelCase : Optional[int] = token_ids lowerCamelCase : Dict = len(self.token_ids ) lowerCamelCase : Union[str, Any] = -1 # the index of the currently fulfilled step lowerCamelCase : str = False def a__ ( self: Optional[int] )-> List[str]: if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def a__ ( self: List[Any] , __a: int )-> Optional[Any]: if not isinstance(a_ , a_ ): raise ValueError(f'`token_id` has to be an `int`, but is {token_id} of type {type(a_ )}' ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def a__ ( self: List[str] , __a: int )-> Optional[int]: if not isinstance(a_ , a_ ): raise ValueError(f'`token_id` has to be an `int`, but is {token_id} of type {type(a_ )}' ) lowerCamelCase : Optional[int] = False lowerCamelCase : List[str] = False lowerCamelCase : List[Any] = False if self.does_advance(a_ ): self.fulfilled_idx += 1 lowerCamelCase : List[str] = True if self.fulfilled_idx == (self.seqlen - 1): lowerCamelCase : str = True lowerCamelCase : Optional[int] = completed else: # failed to make progress. lowerCamelCase : Union[str, Any] = True self.reset() return stepped, completed, reset def a__ ( self: Union[str, Any] )-> Optional[int]: lowerCamelCase : int = False lowerCamelCase : Dict = 0 def a__ ( self: Tuple )-> Optional[int]: return self.seqlen - (self.fulfilled_idx + 1) def a__ ( self: Union[str, Any] , __a: str=False )-> Union[str, Any]: lowerCamelCase : Dict = PhrasalConstraint(self.token_ids ) if stateful: lowerCamelCase : List[Any] = self.seqlen lowerCamelCase : str = self.fulfilled_idx lowerCamelCase : Union[str, Any] = self.completed return new_constraint class A__ : """simple docstring""" def __init__( self: int , __a: List[List[int]] , __a: int=True )-> Dict: lowerCamelCase : List[str] = max([len(a_ ) for one in nested_token_ids] ) lowerCamelCase : int = {} for token_ids in nested_token_ids: lowerCamelCase : List[str] = root for tidx, token_id in enumerate(a_ ): if token_id not in level: lowerCamelCase : int = {} lowerCamelCase : List[Any] = level[token_id] if no_subsets and self.has_subsets(a_ , a_ ): raise ValueError( """Each list in `nested_token_ids` can't be a complete subset of another list, but is""" f' {nested_token_ids}.' ) lowerCamelCase : int = root def a__ ( self: Tuple , __a: Tuple )-> Dict: lowerCamelCase : Union[str, Any] = self.trie for current_token in current_seq: lowerCamelCase : Any = start[current_token] lowerCamelCase : Any = list(start.keys() ) return next_tokens def a__ ( self: List[Any] , __a: Optional[int] )-> List[Any]: lowerCamelCase : Any = self.next_tokens(a_ ) return len(a_ ) == 0 def a__ ( self: Dict , __a: List[Any] )-> Any: lowerCamelCase : int = list(root.values() ) if len(a_ ) == 0: return 1 else: return sum([self.count_leaves(a_ ) for nn in next_nodes] ) def a__ ( self: str , __a: List[Any] , __a: int )-> int: lowerCamelCase : str = self.count_leaves(a_ ) return len(a_ ) != leaf_count class A__ ( __a): """simple docstring""" def __init__( self: Tuple , __a: List[List[int]] )-> int: super(a_ , self ).__init__() if not isinstance(a_ , a_ ) or len(a_ ) == 0: raise ValueError(f'`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.' ) if any(not isinstance(a_ , a_ ) for token_ids in nested_token_ids ): raise ValueError(f'`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.' ) if any( any((not isinstance(a_ , a_ ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( f'Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.' ) lowerCamelCase : Optional[Any] = DisjunctiveTrie(a_ ) lowerCamelCase : Optional[int] = nested_token_ids lowerCamelCase : Tuple = self.trie.max_height lowerCamelCase : List[str] = [] lowerCamelCase : Union[str, Any] = False def a__ ( self: Any )-> Any: lowerCamelCase : List[Any] = self.trie.next_tokens(self.current_seq ) if len(a_ ) == 0: return None else: return token_list def a__ ( self: Tuple , __a: int )-> List[str]: if not isinstance(a_ , a_ ): raise ValueError(f'`token_id` is supposed to be type `int`, but is {token_id} of type {type(a_ )}' ) lowerCamelCase : Optional[Any] = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def a__ ( self: Any , __a: int )-> List[Any]: if not isinstance(a_ , a_ ): raise ValueError(f'`token_id` is supposed to be type `int`, but is {token_id} of type {type(a_ )}' ) lowerCamelCase : Any = False lowerCamelCase : Union[str, Any] = False lowerCamelCase : Tuple = False if self.does_advance(a_ ): self.current_seq.append(a_ ) lowerCamelCase : Optional[Any] = True else: lowerCamelCase : Tuple = True self.reset() lowerCamelCase : Any = self.trie.reached_leaf(self.current_seq ) lowerCamelCase : Tuple = completed return stepped, completed, reset def a__ ( self: str )-> int: lowerCamelCase : Any = False lowerCamelCase : Tuple = [] def a__ ( self: str )-> Union[str, Any]: if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def a__ ( self: Dict , __a: List[Any]=False )-> List[str]: lowerCamelCase : Union[str, Any] = DisjunctiveConstraint(self.token_ids ) if stateful: lowerCamelCase : Tuple = self.seqlen lowerCamelCase : List[str] = self.current_seq lowerCamelCase : List[Any] = self.completed return new_constraint class A__ : """simple docstring""" def __init__( self: Any , __a: List[Constraint] )-> Optional[Any]: lowerCamelCase : Any = constraints # max # of steps required to fulfill a given constraint lowerCamelCase : Optional[int] = max([c.seqlen for c in constraints] ) lowerCamelCase : Optional[Any] = len(a_ ) lowerCamelCase : List[Any] = False self.init_state() def a__ ( self: Optional[int] )-> Optional[Any]: lowerCamelCase : List[Any] = [] lowerCamelCase : Union[str, Any] = None lowerCamelCase : Optional[int] = [constraint.copy(stateful=a_ ) for constraint in self.constraints] def a__ ( self: Union[str, Any] )-> List[str]: lowerCamelCase : Any = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def a__ ( self: Optional[int] )-> Any: lowerCamelCase : Union[str, Any] = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" lowerCamelCase : str = constraint.advance() if isinstance(a_ , a_ ): token_list.append(a_ ) elif isinstance(a_ , a_ ): token_list.extend(a_ ) else: lowerCamelCase : List[str] = self.inprogress_constraint.advance() if isinstance(a_ , a_ ): token_list.append(a_ ) elif isinstance(a_ , a_ ): token_list.extend(a_ ) if len(a_ ) == 0: return None else: return token_list def a__ ( self: int , __a: Optional[List[int]] )-> List[Any]: self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint lowerCamelCase : int = self.add(a_ ) # the entire list of constraints are fulfilled if self.completed: break def a__ ( self: List[Any] , __a: int )-> int: if not isinstance(a_ , a_ ): raise ValueError(f'`token_id` should be an `int`, but is `{token_id}`.' ) lowerCamelCase : List[str] = False, False if self.completed: lowerCamelCase : Dict = True lowerCamelCase : List[Any] = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state lowerCamelCase : Optional[int] = self.inprogress_constraint.update(a_ ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=a_ ) ) lowerCamelCase : Union[str, Any] = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) lowerCamelCase : Dict = None if len(self.pending_constraints ) == 0: # we're done! lowerCamelCase : Union[str, Any] = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(a_ ): lowerCamelCase : Any = pending_constraint.update(a_ ) if not stepped: raise Exception( """`constraint.update(token_id)` is not yielding incremental progress, """ """even though `constraint.does_advance(token_id)` is true.""" ) if complete: self.complete_constraints.append(a_ ) lowerCamelCase : Optional[Any] = None if not complete and stepped: lowerCamelCase : List[Any] = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". lowerCamelCase : Any = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. lowerCamelCase : List[str] = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def a__ ( self: Dict , __a: int=True )-> Optional[int]: lowerCamelCase : Dict = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: lowerCamelCase : List[Any] = [ constraint.copy(stateful=a_ ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: lowerCamelCase : Dict = self.inprogress_constraint.copy(stateful=a_ ) lowerCamelCase : List[str] = [constraint.copy() for constraint in self.pending_constraints] return new_state
706
"""simple docstring""" import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __lowerCamelCase :str = get_tests_dir('fixtures/spiece.model') @require_sentencepiece @require_tokenizers class A__ ( __lowercase , unittest.TestCase): """simple docstring""" snake_case__ : List[str] =AlbertTokenizer snake_case__ : Optional[Any] =AlbertTokenizerFast snake_case__ : Optional[int] =True snake_case__ : Any =True snake_case__ : Optional[int] =True def a__ ( self: Dict )-> Optional[Any]: super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase : int = AlbertTokenizer(__a ) tokenizer.save_pretrained(self.tmpdirname ) def a__ ( self: Tuple , __a: Tuple )-> Union[str, Any]: lowerCamelCase : List[str] = """this is a test""" lowerCamelCase : int = """this is a test""" return input_text, output_text def a__ ( self: Any )-> List[Any]: lowerCamelCase : int = """<pad>""" lowerCamelCase : Optional[int] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a ) def a__ ( self: Tuple )-> str: lowerCamelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<pad>""" ) self.assertEqual(vocab_keys[1] , """<unk>""" ) self.assertEqual(vocab_keys[-1] , """▁eloquent""" ) self.assertEqual(len(__a ) , 30_000 ) def a__ ( self: List[str] )-> Any: self.assertEqual(self.get_tokenizer().vocab_size , 30_000 ) def a__ ( self: Optional[Any] )-> Union[str, Any]: if not self.test_rust_tokenizer: return lowerCamelCase : str = self.get_tokenizer() lowerCamelCase : Tuple = self.get_rust_tokenizer() lowerCamelCase : Union[str, Any] = """I was born in 92000, and this is falsé.""" lowerCamelCase : List[str] = tokenizer.tokenize(__a ) lowerCamelCase : Tuple = rust_tokenizer.tokenize(__a ) self.assertListEqual(__a , __a ) lowerCamelCase : Dict = tokenizer.encode(__a , add_special_tokens=__a ) lowerCamelCase : List[str] = rust_tokenizer.encode(__a , add_special_tokens=__a ) self.assertListEqual(__a , __a ) lowerCamelCase : Any = self.get_rust_tokenizer() lowerCamelCase : List[str] = tokenizer.encode(__a ) lowerCamelCase : str = rust_tokenizer.encode(__a ) self.assertListEqual(__a , __a ) def a__ ( self: Tuple )-> List[Any]: lowerCamelCase : List[str] = AlbertTokenizer(__a , keep_accents=__a ) lowerCamelCase : int = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(__a , ["""▁this""", """▁is""", """▁a""", """▁test"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [48, 25, 21, 1_289] ) lowerCamelCase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( __a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] ) lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(__a ) self.assertListEqual(__a , [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] ) lowerCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__a ) self.assertListEqual( __a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , ) def a__ ( self: Tuple )-> str: lowerCamelCase : str = AlbertTokenizer(__a ) lowerCamelCase : Union[str, Any] = tokenizer.encode("""sequence builders""" ) lowerCamelCase : List[Any] = tokenizer.encode("""multi-sequence build""" ) lowerCamelCase : Any = tokenizer.build_inputs_with_special_tokens(__a ) lowerCamelCase : str = tokenizer.build_inputs_with_special_tokens(__a , __a ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ] @slow def a__ ( self: Any )-> Dict: # fmt: off lowerCamelCase : Optional[Any] = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__a , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
42
0
from __future__ import annotations from math import pow, sqrt def snake_case ( UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] ) -> dict[str, float]: if (resistance, reactance, impedance).count(0 ) != 1: raise ValueError("""One and only one argument must be 0""" ) if resistance == 0: return {"resistance": sqrt(pow(__snake_case , 2 ) - pow(__snake_case , 2 ) )} elif reactance == 0: return {"reactance": sqrt(pow(__snake_case , 2 ) - pow(__snake_case , 2 ) )} elif impedance == 0: return {"impedance": sqrt(pow(__snake_case , 2 ) + pow(__snake_case , 2 ) )} else: raise ValueError("""Exactly one argument must be 0""" ) if __name__ == "__main__": import doctest doctest.testmod()
707
"""simple docstring""" __lowerCamelCase :List[Any] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} __lowerCamelCase :Union[str, Any] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def snake_case ( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] ) -> list[int]: lowerCamelCase : Tuple = True lowerCamelCase : Any = [] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) order.append(UpperCamelCase__ ) return order def snake_case ( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] ) -> list[int]: lowerCamelCase : List[Any] = True lowerCamelCase : int = [vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return component def snake_case ( UpperCamelCase__ : dict[int, list[int]] ) -> list[list[int]]: lowerCamelCase : int = len(UpperCamelCase__ ) * [False] lowerCamelCase : dict[int, list[int]] = {vert: [] for vert in range(len(UpperCamelCase__ ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(UpperCamelCase__ ) lowerCamelCase : int = [] for i, was_visited in enumerate(UpperCamelCase__ ): if not was_visited: order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase : Tuple = [] lowerCamelCase : str = len(UpperCamelCase__ ) * [False] for i in range(len(UpperCamelCase__ ) ): lowerCamelCase : Any = order[len(UpperCamelCase__ ) - i - 1] if not visited[vert]: lowerCamelCase : List[str] = find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) components_list.append(UpperCamelCase__ ) return components_list
42
0
"""simple docstring""" from __future__ import annotations def lowerCAmelCase ( UpperCamelCase__ : list[float] ) -> float: lowerCamelCase : Union[str, Any] = 0.0_0 lowerCamelCase : Optional[int] = 0 for resistor in resistors: if resistor <= 0: lowerCamelCase : Dict = F'Resistor at index {index} has a negative or zero value!' raise ValueError(UpperCAmelCase__ ) first_sum += 1 / float(UpperCAmelCase__ ) index += 1 return 1 / first_sum def lowerCAmelCase ( UpperCamelCase__ : list[float] ) -> float: lowerCamelCase : int = 0.0_0 lowerCamelCase : Union[str, Any] = 0 for resistor in resistors: sum_r += resistor if resistor < 0: lowerCamelCase : List[str] = F'Resistor at index {index} has a negative value!' raise ValueError(UpperCAmelCase__ ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
708
"""simple docstring""" from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase :str = logging.get_logger(__name__) __lowerCamelCase :Any = { 'huggingface/time-series-transformer-tourism-monthly': ( 'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json' ), # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer } class A__ ( __lowercase): """simple docstring""" snake_case__ : List[Any] ='''time_series_transformer''' snake_case__ : List[Any] ={ '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', '''num_hidden_layers''': '''encoder_layers''', } def __init__( self: List[str] , __a: Optional[int] = None , __a: Optional[int] = None , __a: str = "student_t" , __a: str = "nll" , __a: int = 1 , __a: List[int] = [1, 2, 3, 4, 5, 6, 7] , __a: Optional[Union[str, bool]] = "mean" , __a: int = 0 , __a: int = 0 , __a: int = 0 , __a: int = 0 , __a: Optional[List[int]] = None , __a: Optional[List[int]] = None , __a: int = 32 , __a: int = 32 , __a: int = 2 , __a: int = 2 , __a: int = 2 , __a: int = 2 , __a: bool = True , __a: str = "gelu" , __a: int = 64 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: int = 100 , __a: float = 0.02 , __a: Tuple=True , **__a: str , )-> Any: # time series specific configuration lowerCamelCase : str = prediction_length lowerCamelCase : Optional[Any] = context_length or prediction_length lowerCamelCase : Tuple = distribution_output lowerCamelCase : Any = loss lowerCamelCase : List[Any] = input_size lowerCamelCase : int = num_time_features lowerCamelCase : Dict = lags_sequence lowerCamelCase : Optional[int] = scaling lowerCamelCase : int = num_dynamic_real_features lowerCamelCase : Tuple = num_static_real_features lowerCamelCase : Any = num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(__a ) != num_static_categorical_features: raise ValueError( """The cardinality should be a list of the same length as `num_static_categorical_features`""" ) lowerCamelCase : int = cardinality else: lowerCamelCase : Dict = [0] if embedding_dimension and num_static_categorical_features > 0: if len(__a ) != num_static_categorical_features: raise ValueError( """The embedding dimension should be a list of the same length as `num_static_categorical_features`""" ) lowerCamelCase : str = embedding_dimension else: lowerCamelCase : str = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] lowerCamelCase : Any = num_parallel_samples # Transformer architecture configuration lowerCamelCase : Any = input_size * len(__a ) + self._number_of_features lowerCamelCase : List[str] = d_model lowerCamelCase : Tuple = encoder_attention_heads lowerCamelCase : Optional[int] = decoder_attention_heads lowerCamelCase : Union[str, Any] = encoder_ffn_dim lowerCamelCase : str = decoder_ffn_dim lowerCamelCase : str = encoder_layers lowerCamelCase : Any = decoder_layers lowerCamelCase : Optional[int] = dropout lowerCamelCase : List[str] = attention_dropout lowerCamelCase : Tuple = activation_dropout lowerCamelCase : Optional[int] = encoder_layerdrop lowerCamelCase : int = decoder_layerdrop lowerCamelCase : Optional[int] = activation_function lowerCamelCase : Optional[Any] = init_std lowerCamelCase : Optional[Any] = use_cache super().__init__(is_encoder_decoder=__a , **__a ) @property def a__ ( self: int )-> int: return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
42
0
"""simple docstring""" import argparse import os from accelerate.test_utils import execute_subprocess_async def snake_case ( UpperCamelCase__ : str=None ) -> Optional[Any]: if subparsers is not None: lowerCamelCase : Dict = subparsers.add_parser("""test""" ) else: lowerCamelCase : Tuple = argparse.ArgumentParser("""Accelerate test command""" ) parser.add_argument( """--config_file""" , default=_lowercase , help=( """The path to use to store the config file. Will default to a file named default_config.yaml in the cache """ """location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """ """such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """ """with 'huggingface'.""" ) , ) if subparsers is not None: parser.set_defaults(func=_lowercase ) return parser def snake_case ( UpperCamelCase__ : str ) -> str: lowerCamelCase : List[Any] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["""test_utils""", """scripts""", """test_script.py"""] ) if args.config_file is None: lowerCamelCase : int = script_name else: lowerCamelCase : int = F'--config_file={args.config_file} {script_name}' lowerCamelCase : Optional[int] = ["accelerate-launch"] + test_args.split() lowerCamelCase : Optional[int] = execute_subprocess_async(_lowercase , env=os.environ.copy() ) if result.returncode == 0: print("""Test is a success! You are ready for your distributed training!""" ) def snake_case ( ) -> Tuple: lowerCamelCase : Any = test_command_parser() lowerCamelCase : Union[str, Any] = parser.parse_args() test_command(_lowercase ) if __name__ == "__main__": main()
709
"""simple docstring""" from __future__ import annotations __lowerCamelCase :int = 10 def snake_case ( UpperCamelCase__ : list[int] ) -> list[int]: lowerCamelCase : int = 1 lowerCamelCase : Union[str, Any] = max(UpperCamelCase__ ) while placement <= max_digit: # declare and initialize empty buckets lowerCamelCase : list[list] = [[] for _ in range(UpperCamelCase__ )] # split list_of_ints between the buckets for i in list_of_ints: lowerCamelCase : Any = int((i / placement) % RADIX ) buckets[tmp].append(UpperCamelCase__ ) # put each buckets' contents into list_of_ints lowerCamelCase : Dict = 0 for b in range(UpperCamelCase__ ): for i in buckets[b]: lowerCamelCase : List[str] = i a += 1 # move to next placement *= RADIX return list_of_ints if __name__ == "__main__": import doctest doctest.testmod()
42
0
"""simple docstring""" from ..utils import DummyObject, requires_backends class A__ ( metaclass=UpperCAmelCase__): """simple docstring""" snake_case__ : Union[str, Any] =['''torch''', '''scipy'''] def __init__( self: Tuple , *__a: Tuple , **__a: Union[str, Any] )-> Optional[Any]: requires_backends(self , ["""torch""", """scipy"""] ) @classmethod def a__ ( cls: Union[str, Any] , *__a: str , **__a: Tuple )-> Optional[Any]: requires_backends(cls , ["""torch""", """scipy"""] ) @classmethod def a__ ( cls: List[Any] , *__a: Any , **__a: Any )-> Any: requires_backends(cls , ["""torch""", """scipy"""] )
710
"""simple docstring""" import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=None ) -> Tuple: # set parameter of one layer assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match' lowerCamelCase : Dict = nn.Parameter(UpperCamelCase__ ) if bias is not None: assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match' lowerCamelCase : Any = nn.Parameter(UpperCamelCase__ ) def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ) -> Union[str, Any]: # set torch weights for 1-to-1 comparison lowerCamelCase : Dict = np.asarray(weights[0] ) lowerCamelCase : List[Any] = np.asarray(weights[1] ) lowerCamelCase : List[str] = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , ) set_param( torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , ) def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any ) -> List[Any]: # set torch weights for 1-to-1 comparison lowerCamelCase : Tuple = np.asarray(weights[0] ) lowerCamelCase : Any = np.asarray(weights[1] ) lowerCamelCase : List[Any] = np.asarray(weights[2] ) lowerCamelCase : List[str] = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , ) set_param( torch_layer.self_attention.key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , ) set_param( torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , ) def snake_case ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ) -> Optional[Any]: # layernorm 1 lowerCamelCase : str = weights[0][0][0] lowerCamelCase : Optional[int] = np.asarray(layer_norm_a[0] ) lowerCamelCase : Tuple = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , ) # lsh weights + output lowerCamelCase : List[Any] = weights[0][1] if len(UpperCamelCase__ ) < 4: set_layer_weights_in_torch_lsh(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ ) else: set_layer_weights_in_torch_local(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ ) # intermediate weighs lowerCamelCase : int = weights[2][0][1][2] # Chunked Feed Forward if len(UpperCamelCase__ ) == 4: lowerCamelCase : Dict = intermediate_weights[2] # layernorm 2 lowerCamelCase : Optional[int] = np.asarray(intermediate_weights[0][0] ) lowerCamelCase : Tuple = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , ) # intermediate dense lowerCamelCase : Optional[Any] = np.asarray(intermediate_weights[1][0] ) lowerCamelCase : Union[str, Any] = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , ) # intermediate out lowerCamelCase : Optional[int] = np.asarray(intermediate_weights[4][0] ) lowerCamelCase : List[Any] = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , ) def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int ) -> List[Any]: # reformer model lowerCamelCase : List[Any] = torch_model.reformer # word embeds lowerCamelCase : Union[str, Any] = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(UpperCamelCase__ ) , ) if isinstance(weights[3] , UpperCamelCase__ ): lowerCamelCase : Union[str, Any] = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): lowerCamelCase : str = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), F'{position_embeddings[emb_idx]} emb does not match' lowerCamelCase : Dict = nn.Parameter(torch.tensor(UpperCamelCase__ ) ) lowerCamelCase : int = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( UpperCamelCase__ ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): lowerCamelCase : Dict = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # output layer norm lowerCamelCase : Any = np.asarray(weights[7][0] ) lowerCamelCase : List[str] = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , ) # output embeddings lowerCamelCase : List[Any] = np.asarray(weights[9][0] ) lowerCamelCase : Optional[int] = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , ) def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str ) -> Optional[int]: # Initialise PyTorch model lowerCamelCase : Union[str, Any] = ReformerConfig.from_json_file(UpperCamelCase__ ) print(F'Building PyTorch model from configuration: {config}' ) lowerCamelCase : str = ReformerModelWithLMHead(UpperCamelCase__ ) with open(UpperCamelCase__ , """rb""" ) as f: lowerCamelCase : str = pickle.load(UpperCamelCase__ )["""weights"""] set_model_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , config.hidden_size ) # Save pytorch-model print(F'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , UpperCamelCase__ ) if __name__ == "__main__": __lowerCamelCase :Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained Reformer model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __lowerCamelCase :Optional[int] = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
42
0
"""simple docstring""" import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) __lowerCamelCase :int = 'hf-internal-testing/tiny-random-bert' __lowerCamelCase :Tuple = os.path.join(TRANSFORMERS_CACHE, 'models--hf-internal-testing--tiny-random-bert') __lowerCamelCase :List[str] = '9b8c223d42b2188cb49d29af482996f9d0f3e5a6' class A__ ( unittest.TestCase): """simple docstring""" def a__ ( self: List[str] )-> Optional[int]: lowerCamelCase : List[str] = cached_file(UpperCAmelCase_ , UpperCAmelCase_ ) # Should have downloaded the file in here self.assertTrue(os.path.isdir(UpperCAmelCase_ ) ) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) ) ) with open(os.path.join(UpperCAmelCase_ , """refs""" , """main""" ) ) as f: lowerCamelCase : List[str] = f.read() self.assertEqual(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , """snapshots""" , UpperCAmelCase_ , UpperCAmelCase_ ) ) self.assertTrue(os.path.isfile(UpperCAmelCase_ ) ) # File is cached at the same place the second time. lowerCamelCase : str = cached_file(UpperCAmelCase_ , UpperCAmelCase_ ) self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ ) # Using a specific revision to test the full commit hash. lowerCamelCase : Dict = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , revision="""9b8c223""" ) self.assertEqual(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , """snapshots""" , UpperCAmelCase_ , UpperCAmelCase_ ) ) def a__ ( self: Union[str, Any] )-> Optional[int]: with self.assertRaisesRegex(UpperCAmelCase_ , """is not a valid model identifier""" ): lowerCamelCase : int = cached_file("""tiny-random-bert""" , UpperCAmelCase_ ) with self.assertRaisesRegex(UpperCAmelCase_ , """is not a valid git identifier""" ): lowerCamelCase : Tuple = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , revision="""aaaa""" ) with self.assertRaisesRegex(UpperCAmelCase_ , """does not appear to have a file named""" ): lowerCamelCase : List[str] = cached_file(UpperCAmelCase_ , """conf""" ) def a__ ( self: Union[str, Any] )-> Optional[int]: with self.assertRaisesRegex(UpperCAmelCase_ , """does not appear to have a file named""" ): lowerCamelCase : str = cached_file(UpperCAmelCase_ , """conf""" ) with open(os.path.join(UpperCAmelCase_ , """refs""" , """main""" ) ) as f: lowerCamelCase : Tuple = f.read() self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase_ , """.no_exist""" , UpperCAmelCase_ , """conf""" ) ) ) lowerCamelCase : List[str] = cached_file(UpperCAmelCase_ , """conf""" , _raise_exceptions_for_missing_entries=UpperCAmelCase_ ) self.assertIsNone(UpperCAmelCase_ ) lowerCamelCase : Dict = cached_file(UpperCAmelCase_ , """conf""" , local_files_only=UpperCAmelCase_ , _raise_exceptions_for_missing_entries=UpperCAmelCase_ ) self.assertIsNone(UpperCAmelCase_ ) lowerCamelCase : Tuple = mock.Mock() lowerCamelCase : Any = 500 lowerCamelCase : Dict = {} lowerCamelCase : Optional[int] = HTTPError lowerCamelCase : Union[str, Any] = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch("""requests.Session.request""" , return_value=UpperCAmelCase_ ) as mock_head: lowerCamelCase : str = cached_file(UpperCAmelCase_ , """conf""" , _raise_exceptions_for_connection_errors=UpperCAmelCase_ ) self.assertIsNone(UpperCAmelCase_ ) # This check we did call the fake head request mock_head.assert_called() def a__ ( self: str )-> Any: self.assertTrue(has_file("""hf-internal-testing/tiny-bert-pt-only""" , UpperCAmelCase_ ) ) self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , UpperCAmelCase_ ) ) self.assertFalse(has_file("""hf-internal-testing/tiny-bert-pt-only""" , UpperCAmelCase_ ) ) def a__ ( self: int )-> Dict: # `get_file_from_repo` returns None if the file does not exist self.assertIsNone(get_file_from_repo("""bert-base-cased""" , """ahah.txt""" ) ) # The function raises if the repository does not exist. with self.assertRaisesRegex(UpperCAmelCase_ , """is not a valid model identifier""" ): get_file_from_repo("""bert-base-case""" , UpperCAmelCase_ ) # The function raises if the revision does not exist. with self.assertRaisesRegex(UpperCAmelCase_ , """is not a valid git identifier""" ): get_file_from_repo("""bert-base-cased""" , UpperCAmelCase_ , revision="""ahaha""" ) lowerCamelCase : str = get_file_from_repo("""bert-base-cased""" , UpperCAmelCase_ ) # The name is the cached name which is not very easy to test, so instead we load the content. lowerCamelCase : int = json.loads(open(UpperCAmelCase_ , """r""" ).read() ) self.assertEqual(config["""hidden_size"""] , 768 ) def a__ ( self: Optional[Any] )-> str: with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase : int = Path(UpperCAmelCase_ ) / """a.txt""" filename.touch() self.assertEqual(get_file_from_repo(UpperCAmelCase_ , """a.txt""" ) , str(UpperCAmelCase_ ) ) self.assertIsNone(get_file_from_repo(UpperCAmelCase_ , """b.txt""" ) )
711
"""simple docstring""" import inspect import unittest import torch import torch.nn as nn from accelerate.hooks import ( AlignDevicesHook, ModelHook, SequentialHook, add_hook_to_module, attach_align_device_hook, remove_hook_from_module, remove_hook_from_submodules, ) from accelerate.test_utils import require_multi_gpu class A__ ( nn.Module): """simple docstring""" def __init__( self: Dict )-> Dict: super().__init__() lowerCamelCase : Tuple = nn.Linear(3 , 4 ) lowerCamelCase : Optional[Any] = nn.BatchNormad(4 ) lowerCamelCase : Optional[Any] = nn.Linear(4 , 5 ) def a__ ( self: List[str] , __a: List[Any] )-> Optional[Any]: return self.lineara(self.batchnorm(self.lineara(__a ) ) ) class A__ ( __lowercase): """simple docstring""" def a__ ( self: Tuple , __a: int , *__a: Any , **__a: Tuple )-> Tuple: return (args[0] + 1,) + args[1:], kwargs class A__ ( __lowercase): """simple docstring""" def a__ ( self: Optional[int] , __a: List[str] , __a: List[Any] )-> List[str]: return output + 1 class A__ ( unittest.TestCase): """simple docstring""" def a__ ( self: int )-> str: lowerCamelCase : List[str] = ModelForTest() lowerCamelCase : Dict = ModelHook() add_hook_to_module(__a , __a ) self.assertEqual(test_model._hf_hook , __a ) self.assertTrue(hasattr(__a , """_old_forward""" ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , """forward""" ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] ) remove_hook_from_module(__a ) self.assertFalse(hasattr(__a , """_hf_hook""" ) ) self.assertFalse(hasattr(__a , """_old_forward""" ) ) def a__ ( self: int )-> str: lowerCamelCase : List[str] = ModelForTest() lowerCamelCase : Union[str, Any] = ModelHook() add_hook_to_module(__a , __a ) add_hook_to_module(__a , __a , append=__a ) self.assertEqual(isinstance(test_model._hf_hook , __a ) , __a ) self.assertEqual(len(test_model._hf_hook.hooks ) , 2 ) self.assertTrue(hasattr(__a , """_old_forward""" ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , """forward""" ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] ) remove_hook_from_module(__a ) self.assertFalse(hasattr(__a , """_hf_hook""" ) ) self.assertFalse(hasattr(__a , """_old_forward""" ) ) def a__ ( self: List[Any] )-> List[str]: lowerCamelCase : str = ModelForTest() lowerCamelCase : Dict = torch.randn(2 , 3 ) lowerCamelCase : Union[str, Any] = test_model(x + 1 ) lowerCamelCase : Optional[int] = test_model(x + 2 ) lowerCamelCase : List[Any] = PreForwardHook() add_hook_to_module(__a , __a ) lowerCamelCase : Optional[int] = test_model(__a ) self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain lowerCamelCase : Dict = PreForwardHook() add_hook_to_module(__a , __a ) lowerCamelCase : Tuple = test_model(__a ) self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) ) # You need to use the sequential hook to chain two or more hooks lowerCamelCase : Any = SequentialHook(PreForwardHook() , PreForwardHook() ) add_hook_to_module(__a , __a ) lowerCamelCase : Optional[Any] = test_model(__a ) assert torch.allclose(__a , __a , atol=1e-5 ) def a__ ( self: Any )-> Optional[int]: lowerCamelCase : str = ModelForTest() lowerCamelCase : List[str] = torch.randn(2 , 3 ) lowerCamelCase : int = test_model(__a ) lowerCamelCase : Dict = PostForwardHook() add_hook_to_module(__a , __a ) lowerCamelCase : Tuple = test_model(__a ) self.assertTrue(torch.allclose(__a , output + 1 , atol=1e-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain lowerCamelCase : str = PostForwardHook() add_hook_to_module(__a , __a ) lowerCamelCase : Optional[Any] = test_model(__a ) self.assertTrue(torch.allclose(__a , output + 1 , atol=1e-5 ) ) # You need to use the sequential hook to chain two or more hooks lowerCamelCase : Union[str, Any] = SequentialHook(PostForwardHook() , PostForwardHook() ) add_hook_to_module(__a , __a ) lowerCamelCase : str = test_model(__a ) assert torch.allclose(__a , output + 2 , atol=1e-5 ) def a__ ( self: int )-> Dict: lowerCamelCase : List[Any] = ModelForTest() lowerCamelCase : Optional[int] = torch.randn(2 , 3 ) lowerCamelCase : List[str] = test_model(__a ) lowerCamelCase : Any = PostForwardHook() add_hook_to_module(__a , __a ) lowerCamelCase : str = test_model(__a ) self.assertTrue(torch.allclose(__a , output + 1 ) ) self.assertTrue(outputa.requires_grad ) lowerCamelCase : Optional[int] = True lowerCamelCase : Optional[int] = test_model(__a ) self.assertFalse(outputa.requires_grad ) @require_multi_gpu def a__ ( self: List[str] )-> Union[str, Any]: lowerCamelCase : int = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) ) self.assertEqual(model.lineara.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) ) self.assertEqual(model.lineara.weight.device , torch.device(1 ) ) # We can still make a forward pass. The input does not need to be on any particular device lowerCamelCase : str = torch.randn(2 , 3 ) lowerCamelCase : Dict = model(__a ) self.assertEqual(output.device , torch.device(1 ) ) # We can add a general hook to put back output on same device as input. add_hook_to_module(__a , AlignDevicesHook(io_same_device=__a ) ) lowerCamelCase : Optional[int] = torch.randn(2 , 3 ).to(0 ) lowerCamelCase : str = model(__a ) self.assertEqual(output.device , torch.device(0 ) ) def a__ ( self: List[str] )-> Tuple: lowerCamelCase : Union[str, Any] = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices lowerCamelCase : Tuple = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True} add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) # Buffers are not included in the offload by default, so are on the execution device lowerCamelCase : List[Any] = torch.device(hook_kwargs["""execution_device"""] ) self.assertEqual(model.batchnorm.running_mean.device , __a ) lowerCamelCase : Optional[Any] = torch.randn(2 , 3 ) lowerCamelCase : Optional[Any] = model(__a ) self.assertEqual(output.device , __a ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # Now test with buffers included in the offload lowerCamelCase : Any = { """execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True, """offload_buffers""": True, } add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) ) lowerCamelCase : int = torch.randn(2 , 3 ) lowerCamelCase : Optional[int] = model(__a ) self.assertEqual(output.device , __a ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) def a__ ( self: Any )-> List[str]: lowerCamelCase : int = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices lowerCamelCase : int = 0 if torch.cuda.is_available() else """cpu""" attach_align_device_hook(__a , execution_device=__a , offload=__a ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) # Buffers are not included in the offload by default, so are on the execution device lowerCamelCase : List[Any] = torch.device(__a ) self.assertEqual(model.batchnorm.running_mean.device , __a ) lowerCamelCase : Dict = torch.randn(2 , 3 ) lowerCamelCase : Optional[Any] = model(__a ) self.assertEqual(output.device , __a ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(__a ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # Now test with buffers included in the offload attach_align_device_hook(__a , execution_device=__a , offload=__a , offload_buffers=__a ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) ) lowerCamelCase : Optional[int] = torch.randn(2 , 3 ) lowerCamelCase : int = model(__a ) self.assertEqual(output.device , __a ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(__a ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) def a__ ( self: Optional[Any] )-> List[Any]: lowerCamelCase : List[Any] = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices lowerCamelCase : Any = 0 if torch.cuda.is_available() else """cpu""" attach_align_device_hook( __a , execution_device=__a , offload=__a , weights_map=model.state_dict() ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) # Buffers are not included in the offload by default, so are on the execution device lowerCamelCase : List[Any] = torch.device(__a ) self.assertEqual(model.batchnorm.running_mean.device , __a ) lowerCamelCase : Dict = torch.randn(2 , 3 ) lowerCamelCase : int = model(__a ) self.assertEqual(output.device , __a ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(__a ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # Now test with buffers included in the offload attach_align_device_hook( __a , execution_device=__a , offload=__a , weights_map=model.state_dict() , offload_buffers=__a , ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) ) lowerCamelCase : Tuple = torch.randn(2 , 3 ) lowerCamelCase : Any = model(__a ) self.assertEqual(output.device , __a ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(__a ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
42
0
"""simple docstring""" from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS __lowerCamelCase :Dict = logging.get_logger(__name__) __lowerCamelCase :Union[str, Any] = { 'linear': get_linear_schedule_with_warmup, 'cosine': get_cosine_schedule_with_warmup, 'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup, 'polynomial': get_polynomial_decay_schedule_with_warmup, 'constant': get_constant_schedule, 'constant_w_warmup': get_constant_schedule_with_warmup, } class A__ ( UpperCamelCase_): """simple docstring""" def __init__( self: Union[str, Any] , __a: Union[str, Any]=None , __a: Optional[int]=None , *__a: Dict , **__a: str )-> List[Any]: super().__init__(*UpperCamelCase__ , **UpperCamelCase__ ) if config is None: assert isinstance(self.model , UpperCamelCase__ ), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" f' {self.model.__class__}' ) lowerCamelCase : List[Any] = self.model.config else: lowerCamelCase : Union[str, Any] = config lowerCamelCase : List[Any] = data_args lowerCamelCase : str = self.config.tgt_vocab_size if isinstance(self.config , UpperCamelCase__ ) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( f'The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for' """ padding..""" ) if self.args.label_smoothing == 0: lowerCamelCase : int = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id ) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss lowerCamelCase : List[str] = label_smoothed_nll_loss def a__ ( self: Any , __a: int )-> Optional[Any]: if self.optimizer is None: lowerCamelCase : str = ['''bias''', '''LayerNorm.weight'''] lowerCamelCase : Optional[int] = [ { '''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )], '''weight_decay''': self.args.weight_decay, }, { '''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0, }, ] lowerCamelCase : int = Adafactor if self.args.adafactor else AdamW if self.args.adafactor: lowerCamelCase : str = Adafactor lowerCamelCase : Optional[Any] = {'''scale_parameter''': False, '''relative_step''': False} else: lowerCamelCase : Optional[int] = AdamW lowerCamelCase : Any = { '''betas''': (self.args.adam_betaa, self.args.adam_betaa), '''eps''': self.args.adam_epsilon, } lowerCamelCase : Dict = self.args.learning_rate if self.sharded_ddp: lowerCamelCase : Any = OSS( params=UpperCamelCase__ , optim=UpperCamelCase__ , **UpperCamelCase__ , ) else: lowerCamelCase : Any = optimizer_cls(UpperCamelCase__ , **UpperCamelCase__ ) if self.lr_scheduler is None: lowerCamelCase : List[Any] = self._get_lr_scheduler(UpperCamelCase__ ) else: # ignoring --lr_scheduler logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" ) def a__ ( self: Union[str, Any] , __a: List[Any] )-> Optional[int]: lowerCamelCase : Union[str, Any] = arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": lowerCamelCase : List[str] = schedule_func(self.optimizer ) elif self.args.lr_scheduler == "constant_w_warmup": lowerCamelCase : Optional[Any] = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps ) else: lowerCamelCase : str = schedule_func( self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=UpperCamelCase__ ) return scheduler def a__ ( self: Any )-> Optional[Any]: if isinstance(self.train_dataset , torch.utils.data.IterableDataset ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset ) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , ) return ( RandomSampler(self.train_dataset ) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset ) ) def a__ ( self: Union[str, Any] , __a: Tuple , __a: Union[str, Any] , __a: List[str] )-> str: if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token lowerCamelCase : List[str] = model(**UpperCamelCase__ , use_cache=UpperCamelCase__ )[0] lowerCamelCase : List[str] = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) ) else: # compute usual loss via models lowerCamelCase : Union[str, Any] = model(**UpperCamelCase__ , labels=UpperCamelCase__ , use_cache=UpperCamelCase__ )[:2] else: # compute label smoothed loss lowerCamelCase : List[str] = model(**UpperCamelCase__ , use_cache=UpperCamelCase__ )[0] lowerCamelCase : int = torch.nn.functional.log_softmax(UpperCamelCase__ , dim=-1 ) lowerCamelCase : int = self.loss_fn(UpperCamelCase__ , UpperCamelCase__ , self.args.label_smoothing , ignore_index=self.config.pad_token_id ) return loss, logits def a__ ( self: str , __a: Dict , __a: Union[str, Any] )-> List[Any]: lowerCamelCase : List[str] = inputs.pop("""labels""" ) lowerCamelCase : List[Any] = self._compute_loss(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return loss def a__ ( self: str , __a: nn.Module , __a: Dict[str, Union[torch.Tensor, Any]] , __a: bool , __a: Optional[List[str]] = None , )-> Union[str, Any]: lowerCamelCase : Union[str, Any] = self._prepare_inputs(UpperCamelCase__ ) lowerCamelCase : Dict = { '''max_length''': self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, '''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: lowerCamelCase : Optional[Any] = self.model.generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **UpperCamelCase__ , ) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: lowerCamelCase : Optional[Any] = self._pad_tensors_to_max_len(UpperCamelCase__ , gen_kwargs["""max_length"""] ) lowerCamelCase : Tuple = inputs.pop("""labels""" ) with torch.no_grad(): # compute loss on predict data lowerCamelCase : int = self._compute_loss(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase : Optional[Any] = loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) lowerCamelCase : int = generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: lowerCamelCase : Dict = self._pad_tensors_to_max_len(UpperCamelCase__ , gen_kwargs["""max_length"""] ) return (loss, logits, labels) def a__ ( self: List[str] , __a: Optional[int] , __a: Optional[int] )-> int: lowerCamelCase : Union[str, Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( """Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be""" f' padded to `max_length`={max_length}' ) lowerCamelCase : List[Any] = pad_token_id * torch.ones( (tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device ) lowerCamelCase : Tuple = tensor return padded_tensor
712
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __lowerCamelCase :Optional[Any] = { 'configuration_encodec': [ 'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP', 'EncodecConfig', ], 'feature_extraction_encodec': ['EncodecFeatureExtractor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase :Union[str, Any] = [ 'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST', 'EncodecModel', 'EncodecPreTrainedModel', ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys __lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
42
0
"""simple docstring""" import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy __lowerCamelCase :Any = logging.getLogger(__name__) def snake_case ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] = None , UpperCamelCase__ : Optional[Any] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = None , UpperCamelCase__ : List[str] = None , UpperCamelCase__ : Tuple = False , ) -> str: lowerCamelCase : List[str] = bnb_quantization_config.load_in_abit lowerCamelCase : str = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( """You have a version of `bitsandbytes` that is not compatible with 8bit quantization,""" """ make sure you have the latest version of `bitsandbytes` installed.""" ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( """You have a version of `bitsandbytes` that is not compatible with 4bit quantization,""" """make sure you have the latest version of `bitsandbytes` installed.""" ) lowerCamelCase : Optional[int] = [] # custom device map if isinstance(__UpperCamelCase , __UpperCamelCase ) and len(device_map.keys() ) > 1: lowerCamelCase : Dict = [key for key, value in device_map.items() if value in ["""disk""", """cpu"""]] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: lowerCamelCase : Optional[int] = get_keys_to_not_convert(__UpperCamelCase ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(__UpperCamelCase ) lowerCamelCase : Optional[Any] = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: lowerCamelCase : Dict = [] lowerCamelCase : Union[str, Any] = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(__UpperCamelCase ) # compatibility with peft lowerCamelCase : Union[str, Any] = load_in_abit lowerCamelCase : Union[str, Any] = load_in_abit lowerCamelCase : Union[str, Any] = get_parameter_device(__UpperCamelCase ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( """It is not recommended to quantize a loaded model. """ """The model should be instantiated under the `init_empty_weights` context manager.""" ) lowerCamelCase : str = replace_with_bnb_layers(__UpperCamelCase , __UpperCamelCase , modules_to_not_convert=__UpperCamelCase ) # convert param to the right dtype lowerCamelCase : int = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: lowerCamelCase : Optional[int] = name.replace(""".weight""" , """""" ).replace(""".bias""" , """""" ) lowerCamelCase : Tuple = getattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(__UpperCamelCase ): param.to(__UpperCamelCase ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info( F'The model device type is {model_device.type}. However, cuda is needed for quantization.' """We move the model to cuda.""" ) return model elif weights_location is None: raise RuntimeError( F'`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ' ) else: with init_empty_weights(): lowerCamelCase : List[Any] = replace_with_bnb_layers( __UpperCamelCase , __UpperCamelCase , modules_to_not_convert=__UpperCamelCase ) lowerCamelCase : Any = get_quantized_model_device_map( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , max_memory=__UpperCamelCase , no_split_module_classes=__UpperCamelCase , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): lowerCamelCase : Union[str, Any] = True lowerCamelCase : int = any(x in list(device_map.values() ) for x in ["""cpu""", """disk"""] ) load_checkpoint_in_model( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , dtype=bnb_quantization_config.torch_dtype , offload_folder=__UpperCamelCase , offload_state_dict=__UpperCamelCase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(__UpperCamelCase , device_map=__UpperCamelCase , offload_dir=__UpperCamelCase ) def snake_case ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : int=None ) -> Union[str, Any]: if device_map is None: if torch.cuda.is_available(): lowerCamelCase : int = {"""""": torch.cuda.current_device()} else: raise RuntimeError("""No GPU found. A GPU is needed for quantization.""" ) logger.info("""The device_map was not initialized.""" """Setting device_map to `{'':torch.cuda.current_device()}`.""" ) if isinstance(__UpperCamelCase , __UpperCamelCase ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( """If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or """ """'sequential'.""" ) lowerCamelCase : Any = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) lowerCamelCase : List[str] = {} lowerCamelCase : Any = special_dtypes lowerCamelCase : Optional[Any] = no_split_module_classes lowerCamelCase : Dict = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": lowerCamelCase : str = get_balanced_memory( __UpperCamelCase , low_zero=(device_map == """balanced_low_0""") , max_memory=__UpperCamelCase , **__UpperCamelCase , ) lowerCamelCase : Optional[Any] = max_memory lowerCamelCase : int = infer_auto_device_map(__UpperCamelCase , **__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ): # check if don't have any quantized module on the cpu lowerCamelCase : List[str] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules lowerCamelCase : Union[str, Any] = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( """ Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in `torch_dtype`, you need to pass a custom `device_map` to `load_and_quantize_model`. Check https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk for more details. """ ) else: logger.info( """Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit""" ) del device_map_without_some_modules return device_map def snake_case ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Tuple=None ) -> List[Any]: if modules_to_not_convert is None: lowerCamelCase : Optional[int] = [] lowerCamelCase , lowerCamelCase : Dict = _replace_with_bnb_layers( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) if not has_been_replaced: logger.warning( """You are loading your model in 8bit or 4bit but no linear modules were found in your model.""" """ this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.""" """ Please double check your model architecture, or submit an issue on github if you think this is""" """ a bug.""" ) return model def snake_case ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : List[Any]=None , ) -> int: lowerCamelCase : Optional[Any] = False for name, module in model.named_children(): if current_key_name is None: lowerCamelCase : int = [] current_key_name.append(__UpperCamelCase ) if isinstance(__UpperCamelCase , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` lowerCamelCase : int = """.""".join(__UpperCamelCase ) lowerCamelCase : Optional[int] = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: lowerCamelCase : Union[str, Any] = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: lowerCamelCase : Union[str, Any] = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__UpperCamelCase , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: lowerCamelCase : Dict = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError("""load_in_8bit and load_in_4bit can't be both False""" ) lowerCamelCase : Dict = module.weight.data if module.bias is not None: lowerCamelCase : Tuple = module.bias.data bnb_module.requires_grad_(__UpperCamelCase ) setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) lowerCamelCase : Dict = True if len(list(module.children() ) ) > 0: lowerCamelCase , lowerCamelCase : Tuple = _replace_with_bnb_layers( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) lowerCamelCase : List[str] = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def snake_case ( UpperCamelCase__ : List[Any] ) -> Union[str, Any]: # Create a copy of the model with init_empty_weights(): lowerCamelCase : str = deepcopy(__UpperCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager` lowerCamelCase : Any = find_tied_parameters(__UpperCamelCase ) # For compatibility with Accelerate < 0.18 if isinstance(__UpperCamelCase , __UpperCamelCase ): lowerCamelCase : Tuple = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: lowerCamelCase : int = sum(__UpperCamelCase , [] ) lowerCamelCase : Optional[Any] = len(__UpperCamelCase ) > 0 # Check if it is a base model lowerCamelCase : int = False if hasattr(__UpperCamelCase , """base_model_prefix""" ): lowerCamelCase : Optional[int] = not hasattr(__UpperCamelCase , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head lowerCamelCase : List[str] = list(model.named_children() ) lowerCamelCase : str = [list_modules[-1][0]] # add last module together with tied weights lowerCamelCase : Tuple = set(__UpperCamelCase ) - set(__UpperCamelCase ) lowerCamelCase : Tuple = list(set(__UpperCamelCase ) ) + list(__UpperCamelCase ) # remove ".weight" from the keys lowerCamelCase : Optional[Any] = [""".weight""", """.bias"""] lowerCamelCase : Tuple = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: lowerCamelCase : Any = name.replace(__UpperCamelCase , """""" ) filtered_module_names.append(__UpperCamelCase ) return filtered_module_names def snake_case ( UpperCamelCase__ : Optional[int] ) -> str: for m in model.modules(): if isinstance(__UpperCamelCase , bnb.nn.Linearabit ): return True return False def snake_case ( UpperCamelCase__ : Dict ) -> Optional[int]: return next(parameter.parameters() ).device def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : Tuple ) -> Union[str, Any]: # if it is not quantized, we quantize and offload the quantized weights and the SCB stats if fpaa_statistics is None: set_module_tensor_to_device(__UpperCamelCase , __UpperCamelCase , 0 , dtype=__UpperCamelCase , value=__UpperCamelCase ) lowerCamelCase : Any = param_name lowerCamelCase : Any = model if "." in tensor_name: lowerCamelCase : Union[str, Any] = tensor_name.split(""".""" ) for split in splits[:-1]: lowerCamelCase : Tuple = getattr(__UpperCamelCase , __UpperCamelCase ) if new_module is None: raise ValueError(F'{module} has no attribute {split}.' ) lowerCamelCase : Union[str, Any] = new_module lowerCamelCase : Dict = splits[-1] # offload weights lowerCamelCase : Optional[Any] = False offload_weight(module._parameters[tensor_name] , __UpperCamelCase , __UpperCamelCase , index=__UpperCamelCase ) if hasattr(module._parameters[tensor_name] , """SCB""" ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace("""weight""" , """SCB""" ) , __UpperCamelCase , index=__UpperCamelCase , ) else: offload_weight(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , index=__UpperCamelCase ) offload_weight(__UpperCamelCase , param_name.replace("""weight""" , """SCB""" ) , __UpperCamelCase , index=__UpperCamelCase ) set_module_tensor_to_device(__UpperCamelCase , __UpperCamelCase , """meta""" , dtype=__UpperCamelCase , value=torch.empty(*param.size() ) )
713
"""simple docstring""" import inspect import unittest from transformers import ConvNextConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A__ : """simple docstring""" def __init__( self: Optional[Any] , __a: Union[str, Any] , __a: Optional[Any]=13 , __a: Optional[Any]=32 , __a: Dict=3 , __a: int=4 , __a: Dict=[10, 20, 30, 40] , __a: int=[2, 2, 3, 2] , __a: Any=True , __a: List[Any]=True , __a: Any=37 , __a: Optional[int]="gelu" , __a: List[str]=10 , __a: Optional[int]=0.02 , __a: Dict=["stage2", "stage3", "stage4"] , __a: List[str]=[2, 3, 4] , __a: List[str]=None , )-> Union[str, Any]: lowerCamelCase : Optional[int] = parent lowerCamelCase : Optional[int] = batch_size lowerCamelCase : Any = image_size lowerCamelCase : Tuple = num_channels lowerCamelCase : str = num_stages lowerCamelCase : List[str] = hidden_sizes lowerCamelCase : str = depths lowerCamelCase : Dict = is_training lowerCamelCase : Optional[Any] = use_labels lowerCamelCase : List[str] = intermediate_size lowerCamelCase : List[str] = hidden_act lowerCamelCase : List[str] = num_labels lowerCamelCase : Union[str, Any] = initializer_range lowerCamelCase : List[Any] = out_features lowerCamelCase : Optional[Any] = out_indices lowerCamelCase : int = scope def a__ ( self: str )-> Optional[Any]: lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase : Dict = None if self.use_labels: lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.num_labels ) lowerCamelCase : Any = self.get_config() return config, pixel_values, labels def a__ ( self: Dict )-> Union[str, Any]: return ConvNextConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def a__ ( self: Optional[Any] , __a: List[Any] , __a: Any , __a: int )-> List[Any]: lowerCamelCase : Optional[int] = ConvNextModel(config=__a ) model.to(__a ) model.eval() lowerCamelCase : Any = model(__a ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def a__ ( self: int , __a: Union[str, Any] , __a: List[Any] , __a: Tuple )-> Optional[int]: lowerCamelCase : str = ConvNextForImageClassification(__a ) model.to(__a ) model.eval() lowerCamelCase : Any = model(__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a__ ( self: List[Any] , __a: Any , __a: Optional[int] , __a: Tuple )-> List[str]: lowerCamelCase : List[str] = ConvNextBackbone(config=__a ) model.to(__a ) model.eval() lowerCamelCase : int = model(__a ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None lowerCamelCase : Tuple = None lowerCamelCase : List[str] = ConvNextBackbone(config=__a ) model.to(__a ) model.eval() lowerCamelCase : List[Any] = model(__a ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def a__ ( self: Optional[Any] )-> Any: lowerCamelCase : List[Any] = self.prepare_config_and_inputs() lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = config_and_inputs lowerCamelCase : int = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class A__ ( __lowercase , __lowercase , unittest.TestCase): """simple docstring""" snake_case__ : int =( ( ConvNextModel, ConvNextForImageClassification, ConvNextBackbone, ) if is_torch_available() else () ) snake_case__ : str =( {'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification} if is_torch_available() else {} ) snake_case__ : Union[str, Any] =True snake_case__ : Optional[int] =False snake_case__ : Tuple =False snake_case__ : Union[str, Any] =False snake_case__ : Tuple =False def a__ ( self: Optional[Any] )-> Union[str, Any]: lowerCamelCase : Tuple = ConvNextModelTester(self ) lowerCamelCase : List[Any] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 ) def a__ ( self: Optional[int] )-> Dict: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a__ ( self: Optional[int] )-> Optional[Any]: return @unittest.skip(reason="""ConvNext does not use inputs_embeds""" ) def a__ ( self: int )-> Dict: pass @unittest.skip(reason="""ConvNext does not support input and output embeddings""" ) def a__ ( self: Dict )-> Optional[Any]: pass @unittest.skip(reason="""ConvNext does not use feedforward chunking""" ) def a__ ( self: int )-> List[Any]: pass def a__ ( self: Union[str, Any] )-> int: lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase : Any = model_class(__a ) lowerCamelCase : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase : Optional[Any] = [*signature.parameters.keys()] lowerCamelCase : List[str] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __a ) def a__ ( self: Optional[int] )-> str: lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def a__ ( self: str )-> int: lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__a ) def a__ ( self: int )-> Optional[int]: def check_hidden_states_output(__a: Tuple , __a: int , __a: Tuple ): lowerCamelCase : str = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): lowerCamelCase : Tuple = model(**self._prepare_for_class(__a , __a ) ) lowerCamelCase : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase : Optional[int] = self.model_tester.num_stages self.assertEqual(len(__a ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowerCamelCase , lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase : List[Any] = True check_hidden_states_output(__a , __a , __a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase : Tuple = True check_hidden_states_output(__a , __a , __a ) def a__ ( self: Dict )-> Optional[Any]: lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a ) @slow def a__ ( self: Optional[Any] )-> Tuple: for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase : str = ConvNextModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def snake_case ( ) -> Optional[int]: lowerCamelCase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class A__ ( unittest.TestCase): """simple docstring""" @cached_property def a__ ( self: Dict )-> Union[str, Any]: return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None @slow def a__ ( self: List[str] )-> Dict: lowerCamelCase : Tuple = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(__a ) lowerCamelCase : Dict = self.default_image_processor lowerCamelCase : Union[str, Any] = prepare_img() lowerCamelCase : Optional[Any] = image_processor(images=__a , return_tensors="""pt""" ).to(__a ) # forward pass with torch.no_grad(): lowerCamelCase : Any = model(**__a ) # verify the logits lowerCamelCase : Union[str, Any] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , __a ) lowerCamelCase : Tuple = torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(__a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) ) @require_torch class A__ ( unittest.TestCase , __lowercase): """simple docstring""" snake_case__ : Union[str, Any] =(ConvNextBackbone,) if is_torch_available() else () snake_case__ : Optional[Any] =ConvNextConfig snake_case__ : Optional[Any] =False def a__ ( self: List[str] )-> int: lowerCamelCase : Dict = ConvNextModelTester(self )
42
0
"""simple docstring""" import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler __lowerCamelCase :Tuple = 16 __lowerCamelCase :Union[str, Any] = 32 def snake_case ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict = 16 , UpperCamelCase__ : str = "bert-base-cased" ) -> List[Any]: lowerCamelCase : Dict = AutoTokenizer.from_pretrained(UpperCamelCase__ ) lowerCamelCase : Optional[int] = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(UpperCamelCase__ : str ): # max_length=None => use the model max length (it's actually the default) lowerCamelCase : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowerCamelCase : List[str] = datasets.map( UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=UpperCamelCase__ ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCamelCase : Optional[Any] = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(UpperCamelCase__ : List[str] ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(UpperCamelCase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" ) return tokenizer.pad(UpperCamelCase__ , padding="""longest""" , return_tensors="""pt""" ) # Instantiate dataloaders. lowerCamelCase : Any = DataLoader( tokenized_datasets["""train"""] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ ) lowerCamelCase : int = DataLoader( tokenized_datasets["""validation"""] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ ) return train_dataloader, eval_dataloader def snake_case ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] ) -> Optional[Any]: # Initialize accelerator lowerCamelCase : List[Any] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCamelCase : Any = config["""lr"""] lowerCamelCase : Dict = int(config["""num_epochs"""] ) lowerCamelCase : List[Any] = int(config["""seed"""] ) lowerCamelCase : List[str] = int(config["""batch_size"""] ) lowerCamelCase : List[Any] = args.model_name_or_path set_seed(UpperCamelCase__ ) lowerCamelCase : List[str] = get_dataloaders(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCamelCase : Tuple = AutoModelForSequenceClassification.from_pretrained(UpperCamelCase__ , return_dict=UpperCamelCase__ ) # Instantiate optimizer lowerCamelCase : Tuple = ( AdamW if accelerator.state.deepspeed_plugin is None or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) lowerCamelCase : List[str] = optimizer_cls(params=model.parameters() , lr=UpperCamelCase__ ) if accelerator.state.deepspeed_plugin is not None: lowerCamelCase : Any = accelerator.state.deepspeed_plugin.deepspeed_config[ """gradient_accumulation_steps""" ] else: lowerCamelCase : Any = 1 lowerCamelCase : str = (len(UpperCamelCase__ ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): lowerCamelCase : List[Any] = get_linear_schedule_with_warmup( optimizer=UpperCamelCase__ , num_warmup_steps=0 , num_training_steps=UpperCamelCase__ , ) else: lowerCamelCase : Dict = DummyScheduler(UpperCamelCase__ , total_num_steps=UpperCamelCase__ , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCamelCase : Dict = accelerator.prepare( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # We need to keep track of how many total steps we have iterated over lowerCamelCase : str = 0 # We also need to keep track of the stating epoch so files are named properly lowerCamelCase : Tuple = 0 # Now we train the model lowerCamelCase : str = evaluate.load("""glue""" , """mrpc""" ) lowerCamelCase : int = 0 lowerCamelCase : Tuple = {} for epoch in range(UpperCamelCase__ , UpperCamelCase__ ): model.train() for step, batch in enumerate(UpperCamelCase__ ): lowerCamelCase : List[str] = model(**UpperCamelCase__ ) lowerCamelCase : int = outputs.loss lowerCamelCase : List[Any] = loss / gradient_accumulation_steps accelerator.backward(UpperCamelCase__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() lowerCamelCase : List[Any] = 0 for step, batch in enumerate(UpperCamelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowerCamelCase : Optional[int] = model(**UpperCamelCase__ ) lowerCamelCase : Dict = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times lowerCamelCase : str = accelerator.gather( (predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(UpperCamelCase__ ) - 1: lowerCamelCase : int = predictions[: len(eval_dataloader.dataset ) - samples_seen] lowerCamelCase : Union[str, Any] = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=UpperCamelCase__ , references=UpperCamelCase__ , ) lowerCamelCase : List[str] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'epoch {epoch}:' , UpperCamelCase__ ) lowerCamelCase : str = eval_metric["""accuracy"""] if best_performance < eval_metric["accuracy"]: lowerCamelCase : Dict = eval_metric["""accuracy"""] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), F'Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}' accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , """all_results.json""" ) , """w""" ) as f: json.dump(UpperCamelCase__ , UpperCamelCase__ ) def snake_case ( ) -> Optional[int]: lowerCamelCase : int = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" ) parser.add_argument( """--model_name_or_path""" , type=UpperCamelCase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=UpperCamelCase__ , ) parser.add_argument( """--output_dir""" , type=UpperCamelCase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , ) parser.add_argument( """--performance_lower_bound""" , type=UpperCamelCase__ , default=UpperCamelCase__ , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , ) parser.add_argument( """--num_epochs""" , type=UpperCamelCase__ , default=3 , help="""Number of train epochs.""" , ) lowerCamelCase : Optional[int] = parser.parse_args() lowerCamelCase : Any = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16} training_function(UpperCamelCase__ , UpperCamelCase__ ) if __name__ == "__main__": main()
714
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase :Optional[int] = logging.get_logger(__name__) __lowerCamelCase :List[str] = { 'google/realm-cc-news-pretrained-embedder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-encoder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-scorer': ( 'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-openqa': ( 'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json' ), 'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json', 'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json', 'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json', 'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json', # See all REALM models at https://huggingface.co/models?filter=realm } class A__ ( __lowercase): """simple docstring""" snake_case__ : Optional[Any] ='''realm''' def __init__( self: Union[str, Any] , __a: List[Any]=30_522 , __a: List[Any]=768 , __a: List[Any]=128 , __a: Union[str, Any]=12 , __a: Union[str, Any]=12 , __a: Optional[Any]=8 , __a: Dict=3_072 , __a: List[Any]="gelu_new" , __a: List[Any]=0.1 , __a: Tuple=0.1 , __a: Optional[Any]=512 , __a: Optional[int]=2 , __a: str=0.02 , __a: int=1e-1_2 , __a: Optional[Any]=256 , __a: Any=10 , __a: Dict=1e-3 , __a: Optional[Any]=5 , __a: Dict=320 , __a: Tuple=13_353_718 , __a: List[Any]=5_000 , __a: Dict=1 , __a: int=0 , __a: Dict=2 , **__a: List[str] , )-> Any: super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a ) # Common config lowerCamelCase : Optional[Any] = vocab_size lowerCamelCase : str = max_position_embeddings lowerCamelCase : Dict = hidden_size lowerCamelCase : Dict = retriever_proj_size lowerCamelCase : Optional[Any] = num_hidden_layers lowerCamelCase : List[str] = num_attention_heads lowerCamelCase : Tuple = num_candidates lowerCamelCase : int = intermediate_size lowerCamelCase : Dict = hidden_act lowerCamelCase : List[str] = hidden_dropout_prob lowerCamelCase : Dict = attention_probs_dropout_prob lowerCamelCase : Optional[int] = initializer_range lowerCamelCase : Dict = type_vocab_size lowerCamelCase : Optional[Any] = layer_norm_eps # Reader config lowerCamelCase : List[str] = span_hidden_size lowerCamelCase : Dict = max_span_width lowerCamelCase : Optional[Any] = reader_layer_norm_eps lowerCamelCase : Optional[int] = reader_beam_size lowerCamelCase : List[Any] = reader_seq_len # Retrieval config lowerCamelCase : int = num_block_records lowerCamelCase : Dict = searcher_beam_size
42
0
"""simple docstring""" __lowerCamelCase :List[str] = tuple[float, float, float] __lowerCamelCase :Union[str, Any] = tuple[float, float, float] def snake_case ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] ) -> int: lowerCamelCase : Optional[Any] = end_pointa[0] - end_pointa[0] lowerCamelCase : Optional[int] = end_pointa[1] - end_pointa[1] lowerCamelCase : int = end_pointa[2] - end_pointa[2] return (x, y, z) def snake_case ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] ) -> Optional[Any]: lowerCamelCase : Any = ab[1] * ac[2] - ab[2] * ac[1] # *i lowerCamelCase : List[Any] = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j lowerCamelCase : Tuple = ab[0] * ac[1] - ab[1] * ac[0] # *k return (x, y, z) def snake_case ( UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] ) -> str: return tuple(round(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for x in vector ) == (0, 0, 0) def snake_case ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] = 10 ) -> Tuple: lowerCamelCase : Tuple = create_vector(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowerCamelCase : List[str] = create_vector(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return is_zero_vector(get_ad_vectors_cross(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
715
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase :Tuple = logging.get_logger(__name__) __lowerCamelCase :Any = { 'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json', # See all GLPN models at https://huggingface.co/models?filter=glpn } class A__ ( __lowercase): """simple docstring""" snake_case__ : Tuple ='''glpn''' def __init__( self: Dict , __a: List[str]=3 , __a: Optional[int]=4 , __a: Dict=[2, 2, 2, 2] , __a: str=[8, 4, 2, 1] , __a: Optional[int]=[32, 64, 160, 256] , __a: Dict=[7, 3, 3, 3] , __a: Dict=[4, 2, 2, 2] , __a: Optional[Any]=[1, 2, 5, 8] , __a: Tuple=[4, 4, 4, 4] , __a: int="gelu" , __a: Union[str, Any]=0.0 , __a: str=0.0 , __a: Union[str, Any]=0.02 , __a: str=0.1 , __a: Union[str, Any]=1e-6 , __a: Any=64 , __a: Dict=10 , __a: Union[str, Any]=-1 , **__a: Optional[Any] , )-> Dict: super().__init__(**__a ) lowerCamelCase : Dict = num_channels lowerCamelCase : Any = num_encoder_blocks lowerCamelCase : Dict = depths lowerCamelCase : List[str] = sr_ratios lowerCamelCase : Dict = hidden_sizes lowerCamelCase : Tuple = patch_sizes lowerCamelCase : Optional[int] = strides lowerCamelCase : Optional[Any] = mlp_ratios lowerCamelCase : Union[str, Any] = num_attention_heads lowerCamelCase : List[str] = hidden_act lowerCamelCase : Any = hidden_dropout_prob lowerCamelCase : Optional[int] = attention_probs_dropout_prob lowerCamelCase : List[Any] = initializer_range lowerCamelCase : Dict = drop_path_rate lowerCamelCase : Any = layer_norm_eps lowerCamelCase : Optional[Any] = decoder_hidden_size lowerCamelCase : Tuple = max_depth lowerCamelCase : Optional[Any] = head_in_index
42
0
"""simple docstring""" from __future__ import annotations def snake_case ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] ) -> bool: lowerCamelCase : Optional[int] = get_failure_array(_lowerCAmelCase ) # 2) Step through text searching for pattern lowerCamelCase : Optional[Any] = 0, 0 # index into text, pattern while i < len(_lowerCAmelCase ): if pattern[j] == text[i]: if j == (len(_lowerCAmelCase ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: lowerCamelCase : str = failure[j - 1] continue i += 1 return False def snake_case ( UpperCamelCase__ : str ) -> list[int]: lowerCamelCase : List[Any] = [0] lowerCamelCase : List[Any] = 0 lowerCamelCase : Union[str, Any] = 1 while j < len(_lowerCAmelCase ): if pattern[i] == pattern[j]: i += 1 elif i > 0: lowerCamelCase : Optional[int] = failure[i - 1] continue j += 1 failure.append(_lowerCAmelCase ) return failure if __name__ == "__main__": # Test 1) __lowerCamelCase :Optional[Any] = """abc1abc12""" __lowerCamelCase :int = """alskfjaldsabc1abc1abc12k23adsfabcabc""" __lowerCamelCase :Tuple = """alskfjaldsk23adsfabcabc""" assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) __lowerCamelCase :List[Any] = """ABABX""" __lowerCamelCase :Dict = """ABABZABABYABABX""" assert kmp(pattern, text) # Test 3) __lowerCamelCase :Optional[int] = """AAAB""" __lowerCamelCase :Any = """ABAAAAAB""" assert kmp(pattern, text) # Test 4) __lowerCamelCase :Optional[Any] = """abcdabcy""" __lowerCamelCase :Optional[Any] = """abcxabcdabxabcdabcdabcy""" assert kmp(pattern, text) # Test 5) __lowerCamelCase :str = """aabaabaaa""" assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
716
"""simple docstring""" from __future__ import annotations import math def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : int ) -> float: lowerCamelCase : Dict = u for i in range(1 , UpperCamelCase__ ): lowerCamelCase : List[str] = temp * (u - i) return temp def snake_case ( ) -> None: lowerCamelCase : List[Any] = int(input("""enter the numbers of values: """ ) ) lowerCamelCase : list[list[float]] = [] for _ in range(UpperCamelCase__ ): y.append([] ) for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): y[i].append(UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = 0 print("""enter the values of parameters in a list: """ ) lowerCamelCase : Any = list(map(UpperCamelCase__ , input().split() ) ) print("""enter the values of corresponding parameters: """ ) for i in range(UpperCamelCase__ ): lowerCamelCase : int = float(input() ) lowerCamelCase : Dict = int(input("""enter the value to interpolate: """ ) ) lowerCamelCase : List[Any] = (value - x[0]) / (x[1] - x[0]) # for calculating forward difference table for i in range(1 , UpperCamelCase__ ): for j in range(n - i ): lowerCamelCase : str = y[j + 1][i - 1] - y[j][i - 1] lowerCamelCase : Any = y[0][0] for i in range(1 , UpperCamelCase__ ): summ += (ucal(UpperCamelCase__ , UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ ) print(F'the value at {value} is {summ}' ) if __name__ == "__main__": main()
42
0
"""simple docstring""" from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf __lowerCamelCase :Any = logging.get_logger(__name__) @dataclass class A__ ( UpperCamelCase__): """simple docstring""" snake_case__ : int =[ """no_inference""", """no_cuda""", """no_tpu""", """no_speed""", """no_memory""", """no_env_print""", """no_multi_process""", ] def __init__( self: Union[str, Any] , **__a: Optional[int] )-> Union[str, Any]: for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: lowerCamelCase : Any = deprecated_arg[3:] lowerCamelCase : Tuple = not kwargs.pop(__a ) logger.warning( f'{deprecated_arg} is depreciated. Please use --no-{positive_arg} or' f' {positive_arg}={kwargs[positive_arg]}' ) lowerCamelCase : Tuple = kwargs.pop("""tpu_name""" , self.tpu_name ) lowerCamelCase : Dict = kwargs.pop("""device_idx""" , self.device_idx ) lowerCamelCase : List[str] = kwargs.pop("""eager_mode""" , self.eager_mode ) lowerCamelCase : Optional[int] = kwargs.pop("""use_xla""" , self.use_xla ) super().__init__(**__a ) snake_case__ : str =field( default=UpperCamelCase__ , metadata={'''help''': '''Name of TPU'''} , ) snake_case__ : int =field( default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , ) snake_case__ : bool =field(default=UpperCamelCase__ , metadata={'''help''': '''Benchmark models in eager model.'''}) snake_case__ : bool =field( default=UpperCamelCase__ , metadata={ '''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.''' } , ) @cached_property def a__ ( self: Optional[Any] )-> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]: requires_backends(self , ["""tf"""] ) lowerCamelCase : Tuple = None if self.tpu: try: if self.tpu_name: lowerCamelCase : Any = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: lowerCamelCase : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: lowerCamelCase : List[str] = None return tpu @cached_property def a__ ( self: Tuple )-> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]: requires_backends(self , ["""tf"""] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) lowerCamelCase : List[Any] = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" ) lowerCamelCase : str = tf.distribute.OneDeviceStrategy(device=f'/gpu:{self.device_idx}' ) else: tf.config.set_visible_devices([] , """GPU""" ) # disable GPU lowerCamelCase : Optional[int] = tf.distribute.OneDeviceStrategy(device=f'/cpu:{self.device_idx}' ) return strategy @property def a__ ( self: List[Any] )-> bool: requires_backends(self , ["""tf"""] ) return self._setup_tpu is not None @property def a__ ( self: Tuple )-> "tf.distribute.Strategy": requires_backends(self , ["""tf"""] ) return self._setup_strategy @property def a__ ( self: int )-> Union[str, Any]: requires_backends(self , ["""tf"""] ) return tf.config.list_physical_devices("""GPU""" ) @property def a__ ( self: Union[str, Any] )-> int: requires_backends(self , ["""tf"""] ) if self.cuda: return len(self.gpu_list ) return 0 @property def a__ ( self: Tuple )-> bool: return self.n_gpu > 0
717
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available __lowerCamelCase :str = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase :Optional[Any] = ['GPTSw3Tokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_swa import GPTSwaTokenizer else: import sys __lowerCamelCase :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
42
0
"""simple docstring""" def snake_case ( UpperCamelCase__ : Dict ) -> Union[str, Any]: if not all(char in """01""" for char in bin_string ): raise ValueError("""Non-binary value was passed to the function""" ) if not bin_string: raise ValueError("""Empty string was passed to the function""" ) lowerCamelCase : List[Any] = "" while len(__lowerCAmelCase ) % 3 != 0: lowerCamelCase : Tuple = "0" + bin_string lowerCamelCase : Dict = [ bin_string[index : index + 3] for index in range(len(__lowerCAmelCase ) ) if index % 3 == 0 ] for bin_group in bin_string_in_3_list: lowerCamelCase : int = 0 for index, val in enumerate(__lowerCAmelCase ): oct_val += int(2 ** (2 - index) * int(__lowerCAmelCase ) ) oct_string += str(__lowerCAmelCase ) return oct_string if __name__ == "__main__": from doctest import testmod testmod()
718
"""simple docstring""" import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() __lowerCamelCase :Dict = logging.get_logger() def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : LevitConfig , UpperCamelCase__ : Path , UpperCamelCase__ : bool = True ) -> Dict: print(F'Converting {name}...' ) with torch.no_grad(): if hidden_sizes == 128: if name[-1] == "S": lowerCamelCase : Optional[Any] = timm.create_model("""levit_128s""" , pretrained=UpperCamelCase__ ) else: lowerCamelCase : Dict = timm.create_model("""levit_128""" , pretrained=UpperCamelCase__ ) if hidden_sizes == 192: lowerCamelCase : Tuple = timm.create_model("""levit_192""" , pretrained=UpperCamelCase__ ) if hidden_sizes == 256: lowerCamelCase : Optional[int] = timm.create_model("""levit_256""" , pretrained=UpperCamelCase__ ) if hidden_sizes == 384: lowerCamelCase : Dict = timm.create_model("""levit_384""" , pretrained=UpperCamelCase__ ) from_model.eval() lowerCamelCase : Optional[Any] = LevitForImageClassificationWithTeacher(UpperCamelCase__ ).eval() lowerCamelCase : Tuple = OrderedDict() lowerCamelCase : Optional[Any] = from_model.state_dict() lowerCamelCase : str = list(from_model.state_dict().keys() ) lowerCamelCase : List[Any] = list(our_model.state_dict().keys() ) print(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) ) for i in range(len(UpperCamelCase__ ) ): lowerCamelCase : str = weights[og_keys[i]] our_model.load_state_dict(UpperCamelCase__ ) lowerCamelCase : int = torch.randn((2, 3, 224, 224) ) lowerCamelCase : Any = from_model(UpperCamelCase__ ) lowerCamelCase : List[Any] = our_model(UpperCamelCase__ ).logits assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ ), "The model logits don't match the original one." lowerCamelCase : Dict = name print(UpperCamelCase__ ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) lowerCamelCase : Optional[int] = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(F'Pushed {checkpoint_name}' ) def snake_case ( UpperCamelCase__ : Path , UpperCamelCase__ : str = None , UpperCamelCase__ : bool = True ) -> Optional[int]: lowerCamelCase : Optional[Any] = """imagenet-1k-id2label.json""" lowerCamelCase : List[Any] = 1000 lowerCamelCase : Dict = (1, num_labels) lowerCamelCase : List[Any] = """huggingface/label-files""" lowerCamelCase : Optional[int] = num_labels lowerCamelCase : List[str] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) ) lowerCamelCase : Any = {int(UpperCamelCase__ ): v for k, v in idalabel.items()} lowerCamelCase : List[Any] = idalabel lowerCamelCase : str = {v: k for k, v in idalabel.items()} lowerCamelCase : Tuple = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ ) lowerCamelCase : Optional[int] = { """levit-128S""": 128, """levit-128""": 128, """levit-192""": 192, """levit-256""": 256, """levit-384""": 384, } lowerCamelCase : List[Any] = { """levit-128S""": ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), """levit-128""": ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), """levit-192""": ImageNetPreTrainedConfig( hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), """levit-256""": ImageNetPreTrainedConfig( hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), """levit-384""": ImageNetPreTrainedConfig( hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] , UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return config, expected_shape if __name__ == "__main__": __lowerCamelCase :Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default=None, type=str, help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,', ) parser.add_argument( '--pytorch_dump_folder_path', default='levit-dump-folder/', type=Path, required=False, help='Path to the output PyTorch model directory.', ) parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') parser.add_argument( '--no-push_to_hub', dest='push_to_hub', action='store_false', help='Do not push model and image processor to the hub', ) __lowerCamelCase :List[Any] = parser.parse_args() __lowerCamelCase :Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
42
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __lowerCamelCase :int = { 'configuration_tapas': ['TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TapasConfig'], 'tokenization_tapas': ['TapasTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase :Any = [ 'TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST', 'TapasForMaskedLM', 'TapasForQuestionAnswering', 'TapasForSequenceClassification', 'TapasModel', 'TapasPreTrainedModel', 'load_tf_weights_in_tapas', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase :List[Any] = [ 'TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFTapasForMaskedLM', 'TFTapasForQuestionAnswering', 'TFTapasForSequenceClassification', 'TFTapasModel', 'TFTapasPreTrainedModel', ] if TYPE_CHECKING: from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig from .tokenization_tapas import TapasTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tapas import ( TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasPreTrainedModel, load_tf_weights_in_tapas, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_tapas import ( TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TFTapasForMaskedLM, TFTapasForQuestionAnswering, TFTapasForSequenceClassification, TFTapasModel, TFTapasPreTrainedModel, ) else: import sys __lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
719
"""simple docstring""" import torch from diffusers import KDPMaDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class A__ ( __lowercase): """simple docstring""" snake_case__ : Tuple =(KDPMaDiscreteScheduler,) snake_case__ : Tuple =10 def a__ ( self: List[Any] , **__a: Optional[int] )-> Union[str, Any]: lowerCamelCase : int = { """num_train_timesteps""": 1_100, """beta_start""": 0.00_01, """beta_end""": 0.02, """beta_schedule""": """linear""", } config.update(**__a ) return config def a__ ( self: Union[str, Any] )-> Any: for timesteps in [10, 50, 100, 1_000]: self.check_over_configs(num_train_timesteps=__a ) def a__ ( self: str )-> int: for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ): self.check_over_configs(beta_start=__a , beta_end=__a ) def a__ ( self: int )-> Union[str, Any]: for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=__a ) def a__ ( self: List[Any] )-> List[Any]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__a ) def a__ ( self: Union[str, Any] )-> int: lowerCamelCase : List[str] = self.scheduler_classes[0] lowerCamelCase : Union[str, Any] = self.get_scheduler_config(prediction_type="""v_prediction""" ) lowerCamelCase : List[str] = scheduler_class(**__a ) scheduler.set_timesteps(self.num_inference_steps ) lowerCamelCase : Dict = self.dummy_model() lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCamelCase : List[Any] = sample.to(__a ) for i, t in enumerate(scheduler.timesteps ): lowerCamelCase : Optional[Any] = scheduler.scale_model_input(__a , __a ) lowerCamelCase : Optional[int] = model(__a , __a ) lowerCamelCase : Tuple = scheduler.step(__a , __a , __a ) lowerCamelCase : Optional[Any] = output.prev_sample lowerCamelCase : List[str] = torch.sum(torch.abs(__a ) ) lowerCamelCase : Tuple = torch.mean(torch.abs(__a ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 4.6_9_3_4e-0_7 ) < 1e-2 assert abs(result_mean.item() - 6.1_1_1_2e-1_0 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7 ) < 1e-2 assert abs(result_mean.item() - 0.00_02 ) < 1e-3 def a__ ( self: Any )-> Any: if torch_device == "mps": return lowerCamelCase : Dict = self.scheduler_classes[0] lowerCamelCase : Dict = self.get_scheduler_config() lowerCamelCase : int = scheduler_class(**__a ) scheduler.set_timesteps(self.num_inference_steps ) lowerCamelCase : List[Any] = self.dummy_model() lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCamelCase : Optional[int] = sample.to(__a ) for i, t in enumerate(scheduler.timesteps ): lowerCamelCase : Dict = scheduler.scale_model_input(__a , __a ) lowerCamelCase : Optional[Any] = model(__a , __a ) lowerCamelCase : Tuple = scheduler.step(__a , __a , __a ) lowerCamelCase : str = output.prev_sample lowerCamelCase : Tuple = torch.sum(torch.abs(__a ) ) lowerCamelCase : Tuple = torch.mean(torch.abs(__a ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 20.41_25 ) < 1e-2 assert abs(result_mean.item() - 0.02_66 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 20.41_25 ) < 1e-2 assert abs(result_mean.item() - 0.02_66 ) < 1e-3 def a__ ( self: Optional[Any] )-> List[Any]: if torch_device == "mps": return lowerCamelCase : Any = self.scheduler_classes[0] lowerCamelCase : Union[str, Any] = self.get_scheduler_config() lowerCamelCase : Optional[Any] = scheduler_class(**__a ) scheduler.set_timesteps(self.num_inference_steps , device=__a ) lowerCamelCase : Union[str, Any] = self.dummy_model() lowerCamelCase : List[str] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma for t in scheduler.timesteps: lowerCamelCase : Union[str, Any] = scheduler.scale_model_input(__a , __a ) lowerCamelCase : Optional[int] = model(__a , __a ) lowerCamelCase : int = scheduler.step(__a , __a , __a ) lowerCamelCase : int = output.prev_sample lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(__a ) ) lowerCamelCase : int = torch.mean(torch.abs(__a ) ) if str(__a ).startswith("""cpu""" ): # The following sum varies between 148 and 156 on mps. Why? assert abs(result_sum.item() - 20.41_25 ) < 1e-2 assert abs(result_mean.item() - 0.02_66 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 20.41_25 ) < 1e-2 assert abs(result_mean.item() - 0.02_66 ) < 1e-3
42
0
"""simple docstring""" import math from numpy import inf from scipy.integrate import quad def snake_case ( UpperCamelCase__ : float ) -> List[str]: if num <= 0: raise ValueError("""math domain error""" ) return quad(_UpperCamelCase , 0 , _UpperCamelCase , args=(_UpperCamelCase) )[0] def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : float ) -> str: return math.pow(_UpperCamelCase , z - 1 ) * math.exp(-x ) if __name__ == "__main__": from doctest import testmod testmod()
720
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class A__ ( __lowercase , __lowercase , unittest.TestCase): """simple docstring""" snake_case__ : str =StableDiffusionXLImgaImgPipeline snake_case__ : Any =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''} snake_case__ : Optional[int] =PipelineTesterMixin.required_optional_params - {'''latents'''} snake_case__ : Dict =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS snake_case__ : Tuple =IMAGE_TO_IMAGE_IMAGE_PARAMS snake_case__ : List[str] =IMAGE_TO_IMAGE_IMAGE_PARAMS def a__ ( self: List[str] )-> int: torch.manual_seed(0 ) lowerCamelCase : Any = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=__a , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , ) lowerCamelCase : Any = EulerDiscreteScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , ) torch.manual_seed(0 ) lowerCamelCase : Any = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) lowerCamelCase : Optional[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=32 , ) lowerCamelCase : Dict = CLIPTextModel(__a ) lowerCamelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__a ) lowerCamelCase : Dict = CLIPTextModelWithProjection(__a ) lowerCamelCase : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__a ) lowerCamelCase : str = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """text_encoder_2""": text_encoder_a, """tokenizer_2""": tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def a__ ( self: Any , __a: str , __a: Tuple=0 )-> Union[str, Any]: lowerCamelCase : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a ) lowerCamelCase : Any = image / 2 + 0.5 if str(__a ).startswith("""mps""" ): lowerCamelCase : Dict = torch.manual_seed(__a ) else: lowerCamelCase : Tuple = torch.Generator(device=__a ).manual_seed(__a ) lowerCamelCase : Tuple = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 5.0, """output_type""": """numpy""", """strength""": 0.75, } return inputs def a__ ( self: Dict )-> Optional[Any]: lowerCamelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator lowerCamelCase : Union[str, Any] = self.get_dummy_components() lowerCamelCase : Optional[int] = StableDiffusionXLImgaImgPipeline(**__a ) lowerCamelCase : int = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) lowerCamelCase : Optional[Any] = self.get_dummy_inputs(__a ) lowerCamelCase : Optional[int] = sd_pipe(**__a ).images lowerCamelCase : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCamelCase : Any = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def a__ ( self: Optional[int] )-> Union[str, Any]: super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 ) def a__ ( self: Optional[Any] )-> str: super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def a__ ( self: List[str] )-> Optional[Any]: pass def a__ ( self: List[Any] )-> Union[str, Any]: lowerCamelCase : Tuple = self.get_dummy_components() lowerCamelCase : Union[str, Any] = StableDiffusionXLImgaImgPipeline(**__a ) lowerCamelCase : str = sd_pipe.to(__a ) lowerCamelCase : Any = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) # forward without prompt embeds lowerCamelCase : Dict = self.get_dummy_inputs(__a ) lowerCamelCase : Any = 3 * ["""this is a negative prompt"""] lowerCamelCase : Optional[int] = negative_prompt lowerCamelCase : Tuple = 3 * [inputs["""prompt"""]] lowerCamelCase : List[Any] = sd_pipe(**__a ) lowerCamelCase : Optional[int] = output.images[0, -3:, -3:, -1] # forward with prompt embeds lowerCamelCase : Tuple = self.get_dummy_inputs(__a ) lowerCamelCase : List[Any] = 3 * ["""this is a negative prompt"""] lowerCamelCase : Tuple = 3 * [inputs.pop("""prompt""" )] ( ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ) : Union[str, Any] = sd_pipe.encode_prompt(__a , negative_prompt=__a ) lowerCamelCase : int = sd_pipe( **__a , prompt_embeds=__a , negative_prompt_embeds=__a , pooled_prompt_embeds=__a , negative_pooled_prompt_embeds=__a , ) lowerCamelCase : Union[str, Any] = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4 @slow @require_torch_gpu class A__ ( unittest.TestCase): """simple docstring""" def a__ ( self: Dict )-> str: super().tearDown() gc.collect() torch.cuda.empty_cache() def a__ ( self: Union[str, Any] , __a: Any , __a: Any="cpu" , __a: str=torch.floataa , __a: Any=0 )-> Optional[Any]: lowerCamelCase : Optional[Any] = torch.Generator(device=__a ).manual_seed(__a ) lowerCamelCase : List[Any] = np.random.RandomState(__a ).standard_normal((1, 4, 64, 64) ) lowerCamelCase : List[str] = torch.from_numpy(__a ).to(device=__a , dtype=__a ) lowerCamelCase : int = { """prompt""": """a photograph of an astronaut riding a horse""", """latents""": latents, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def a__ ( self: Optional[int] )-> List[str]: lowerCamelCase : Tuple = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) lowerCamelCase : Optional[int] = self.get_inputs(__a ) lowerCamelCase : Optional[Any] = pipe(**__a ).images lowerCamelCase : Dict = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) lowerCamelCase : List[str] = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] ) assert np.abs(image_slice - expected_slice ).max() < 7e-3
42
0
"""simple docstring""" import argparse import torch from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def snake_case ( UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Any ) -> Union[str, Any]: # Construct model if gpta_config_file == "": lowerCamelCase : Dict = GPTaConfig() else: lowerCamelCase : Optional[int] = GPTaConfig.from_json_file(UpperCamelCase__ ) lowerCamelCase : Optional[int] = GPTaModel(UpperCamelCase__ ) # Load weights from numpy load_tf_weights_in_gpta(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Save pytorch-model lowerCamelCase : Union[str, Any] = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME lowerCamelCase : Optional[int] = pytorch_dump_folder_path + """/""" + CONFIG_NAME print(F'Save PyTorch model to {pytorch_weights_dump_path}' ) torch.save(model.state_dict() , UpperCamelCase__ ) print(F'Save configuration file to {pytorch_config_dump_path}' ) with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": __lowerCamelCase :Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--gpt2_config_file', default='', type=str, help=( 'An optional config json file corresponding to the pre-trained OpenAI model. \n' 'This specifies the model architecture.' ), ) __lowerCamelCase :Tuple = parser.parse_args() convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
721
"""simple docstring""" import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class A__ : """simple docstring""" def a__ ( self: Optional[int] , __a: Optional[int] , __a: Tuple , __a: Optional[int] )-> List[str]: return None class A__ : """simple docstring""" def a__ ( self: Optional[int] , __a: Tuple , __a: str , __a: str , __a: str )-> Tuple: return None class A__ ( unittest.TestCase): """simple docstring""" snake_case__ : Optional[Any] =[ # (model_name, model_kwargs) ('''bert-base-cased''', {}), ('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def a__ ( self: Optional[Any] )-> int: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__a , """tf""" , 12 , **__a ) @require_torch @slow def a__ ( self: str )-> int: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__a , """pt""" , 12 , **__a ) @require_torch @slow def a__ ( self: Union[str, Any] )-> Dict: from transformers import BertModel lowerCamelCase : int = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""] with NamedTemporaryFile(mode="""w+t""" ) as vocab_file: vocab_file.write("""\n""".join(__a ) ) vocab_file.flush() lowerCamelCase : Dict = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: lowerCamelCase : List[str] = BertModel(BertConfig(vocab_size=len(__a ) ) ) model.save_pretrained(__a ) self._test_export(__a , """pt""" , 12 , __a ) @require_tf @slow def a__ ( self: Optional[Any] )-> Optional[int]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowerCamelCase : Optional[int] = self._test_export(__a , """tf""" , 12 , **__a ) lowerCamelCase : Tuple = quantize(Path(__a ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__a ).stat().st_size: self.fail("""Quantized model is bigger than initial ONNX model""" ) @require_torch @slow def a__ ( self: Any )-> Optional[int]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowerCamelCase : Any = self._test_export(__a , """pt""" , 12 , **__a ) lowerCamelCase : Dict = quantize(__a ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__a ).stat().st_size: self.fail("""Quantized model is bigger than initial ONNX model""" ) def a__ ( self: List[Any] , __a: Optional[Any] , __a: List[Any] , __a: Union[str, Any] , __a: Optional[Any]=None , **__a: Optional[int] )-> Any: try: # Compute path with TemporaryDirectory() as tempdir: lowerCamelCase : Optional[Any] = Path(__a ).joinpath("""model.onnx""" ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(__a , __a , __a , __a , __a , **__a ) return path except Exception as e: self.fail(__a ) @require_torch @require_tokenizers @slow def a__ ( self: Tuple )-> Dict: from transformers import BertModel lowerCamelCase : int = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) ) lowerCamelCase : List[Any] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" ) self._test_infer_dynamic_axis(__a , __a , """pt""" ) @require_tf @require_tokenizers @slow def a__ ( self: Optional[Any] )-> List[Any]: from transformers import TFBertModel lowerCamelCase : Union[str, Any] = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) ) lowerCamelCase : str = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" ) self._test_infer_dynamic_axis(__a , __a , """tf""" ) def a__ ( self: List[str] , __a: str , __a: Optional[Any] , __a: str )-> List[Any]: lowerCamelCase : List[str] = FeatureExtractionPipeline(__a , __a ) lowerCamelCase : List[str] = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""] lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = infer_shapes(__a , __a ) # Assert all variables are present self.assertEqual(len(__a ) , len(__a ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] , __a ) self.assertSequenceEqual(variable_names[3:] , __a ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} ) self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} ) def a__ ( self: List[Any] )-> int: lowerCamelCase : List[str] = ["""input_ids""", """attention_mask""", """token_type_ids"""] lowerCamelCase : str = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]} lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncContiguousArgs() , __a , __a ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(__a ) , 3 ) # Should have exactly the same input names self.assertEqual(set(__a ) , set(__a ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(__a , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncNonContiguousArgs() , __a , __a ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(__a ) , 1 ) self.assertEqual(len(__a ) , 1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] , tokens["""input_ids"""] ) self.assertEqual(ordered_input_names[0] , """input_ids""" ) def a__ ( self: Tuple )-> Tuple: lowerCamelCase : Optional[int] = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" ) self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
42
0
"""simple docstring""" from __future__ import annotations def snake_case ( UpperCamelCase__ : Union[str, Any] ) -> int: if not nums: return 0 lowerCamelCase : Union[str, Any] = nums[0] lowerCamelCase : Tuple = 0 for num in nums[1:]: lowerCamelCase , lowerCamelCase : str = ( max_excluding + num, max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), ) return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
700
"""simple docstring""" import unittest from knapsack import greedy_knapsack as kp class A__ ( unittest.TestCase): """simple docstring""" def a__ ( self: Optional[int] )-> Union[str, Any]: lowerCamelCase : Tuple = [10, 20, 30, 40, 50, 60] lowerCamelCase : Union[str, Any] = [2, 4, 6, 8, 10, 12] lowerCamelCase : Union[str, Any] = 100 self.assertEqual(kp.calc_profit(__a , __a , __a ) , 210 ) def a__ ( self: str )-> str: self.assertRaisesRegex(__a , """max_weight must greater than zero.""" ) def a__ ( self: str )-> List[Any]: self.assertRaisesRegex(__a , """Weight can not be negative.""" ) def a__ ( self: Any )-> Dict: self.assertRaisesRegex(__a , """Profit can not be negative.""" ) def a__ ( self: Optional[Any] )-> List[Any]: self.assertRaisesRegex(__a , """max_weight must greater than zero.""" ) def a__ ( self: Optional[Any] )-> Tuple: self.assertRaisesRegex( __a , """The length of profit and weight must be same.""" ) if __name__ == "__main__": unittest.main()
42
0
"""simple docstring""" from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def snake_case ( UpperCamelCase__ : List[str] ) -> List[Any]: # A local function to see if a dot lands in the circle. def is_in_circle(UpperCamelCase__ : str , UpperCamelCase__ : Any ) -> bool: lowerCamelCase : str = sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle lowerCamelCase : Dict = mean( int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) ) for _ in range(lowercase__ ) ) # The ratio of the area for circle to square is pi/4. lowerCamelCase : Tuple = proportion * 4 print(F'The estimated value of pi is {pi_estimate}' ) print(F'The numpy value of pi is {pi}' ) print(F'The total error is {abs(pi - pi_estimate )}' ) def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] = 0.0 , UpperCamelCase__ : List[str] = 1.0 , ) -> Optional[int]: return mean( function_to_integrate(uniform(lowercase__ , lowercase__ ) ) for _ in range(lowercase__ ) ) * (max_value - min_value) def snake_case ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] = 0.0 , UpperCamelCase__ : Any = 1.0 ) -> Dict: def identity_function(UpperCamelCase__ : List[str] ) -> float: return x lowerCamelCase : Union[str, Any] = area_under_curve_estimator( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) lowerCamelCase : Union[str, Any] = (max_value * max_value - min_value * min_value) / 2 print("""******************""" ) print(F'Estimating area under y=x where x varies from {min_value} to {max_value}' ) print(F'Estimated value is {estimated_value}' ) print(F'Expected value is {expected_value}' ) print(F'Total error is {abs(estimated_value - expected_value )}' ) print("""******************""" ) def snake_case ( UpperCamelCase__ : List[Any] ) -> Dict: def function_to_integrate(UpperCamelCase__ : Dict ) -> float: return sqrt(4.0 - x * x ) lowerCamelCase : Any = area_under_curve_estimator( lowercase__ , lowercase__ , 0.0 , 2.0 ) print("""******************""" ) print("""Estimating pi using area_under_curve_estimator""" ) print(F'Estimated value is {estimated_value}' ) print(F'Expected value is {pi}' ) print(F'Total error is {abs(estimated_value - pi )}' ) print("""******************""" ) if __name__ == "__main__": import doctest doctest.testmod()
701
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) __lowerCamelCase :List[str] = { 'configuration_owlvit': [ 'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OwlViTConfig', 'OwlViTOnnxConfig', 'OwlViTTextConfig', 'OwlViTVisionConfig', ], 'processing_owlvit': ['OwlViTProcessor'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase :Optional[int] = ['OwlViTFeatureExtractor'] __lowerCamelCase :List[str] = ['OwlViTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase :Optional[Any] = [ 'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'OwlViTModel', 'OwlViTPreTrainedModel', 'OwlViTTextModel', 'OwlViTVisionModel', 'OwlViTForObjectDetection', ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys __lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
42
0
"""simple docstring""" import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) __lowerCamelCase :List[str] = logging.getLogger(__name__) def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : int ) -> List[str]: lowerCamelCase : Optional[int] = np.argmax(lowerCAmelCase_ , axis=1 ) return np.sum(outputs == labels ) def snake_case ( UpperCamelCase__ : Dict ) -> Any: with open(lowerCAmelCase_ , encoding="""utf_8""" ) as f: lowerCamelCase : Dict = csv.reader(lowerCAmelCase_ ) lowerCamelCase : Tuple = [] next(lowerCAmelCase_ ) # skip the first line for line in tqdm(lowerCAmelCase_ ): output.append((""" """.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) ) return output def snake_case ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int ) -> Optional[int]: lowerCamelCase : Union[str, Any] = [] for dataset in encoded_datasets: lowerCamelCase : List[Any] = len(lowerCAmelCase_ ) lowerCamelCase : int = np.zeros((n_batch, 2, input_len) , dtype=np.intaa ) lowerCamelCase : Optional[Any] = np.zeros((n_batch, 2) , dtype=np.intaa ) lowerCamelCase : Optional[Any] = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa ) lowerCamelCase : int = np.zeros((n_batch,) , dtype=np.intaa ) for ( i, (story, conta, conta, mc_label), ) in enumerate(lowerCAmelCase_ ): lowerCamelCase : Any = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] lowerCamelCase : List[Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] lowerCamelCase : List[Any] = with_conta lowerCamelCase : Dict = with_conta lowerCamelCase : str = len(lowerCAmelCase_ ) - 1 lowerCamelCase : List[Any] = len(lowerCAmelCase_ ) - 1 lowerCamelCase : Optional[Any] = with_conta lowerCamelCase : List[Any] = with_conta lowerCamelCase : Optional[Any] = mc_label lowerCamelCase : Optional[Any] = (input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(lowerCAmelCase_ ) for t in all_inputs ) ) return tensor_datasets def snake_case ( ) -> List[str]: lowerCamelCase : List[Any] = argparse.ArgumentParser() parser.add_argument("""--model_name""" , type=lowerCAmelCase_ , default="""openai-gpt""" , help="""pretrained model name""" ) parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" ) parser.add_argument("""--do_eval""" , action="""store_true""" , help="""Whether to run eval on the dev set.""" ) parser.add_argument( """--output_dir""" , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="""The output directory where the model predictions and checkpoints will be written.""" , ) parser.add_argument("""--train_dataset""" , type=lowerCAmelCase_ , default="""""" ) parser.add_argument("""--eval_dataset""" , type=lowerCAmelCase_ , default="""""" ) parser.add_argument("""--seed""" , type=lowerCAmelCase_ , default=42 ) parser.add_argument("""--num_train_epochs""" , type=lowerCAmelCase_ , default=3 ) parser.add_argument("""--train_batch_size""" , type=lowerCAmelCase_ , default=8 ) parser.add_argument("""--eval_batch_size""" , type=lowerCAmelCase_ , default=16 ) parser.add_argument("""--adam_epsilon""" , default=1E-8 , type=lowerCAmelCase_ , help="""Epsilon for Adam optimizer.""" ) parser.add_argument("""--max_grad_norm""" , type=lowerCAmelCase_ , default=1 ) parser.add_argument( """--max_steps""" , default=-1 , type=lowerCAmelCase_ , help=( """If > 0: set total number of training steps to perform. Override num_train_epochs.""" ) , ) parser.add_argument( """--gradient_accumulation_steps""" , type=lowerCAmelCase_ , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , ) parser.add_argument("""--learning_rate""" , type=lowerCAmelCase_ , default=6.25E-5 ) parser.add_argument("""--warmup_steps""" , default=0 , type=lowerCAmelCase_ , help="""Linear warmup over warmup_steps.""" ) parser.add_argument("""--lr_schedule""" , type=lowerCAmelCase_ , default="""warmup_linear""" ) parser.add_argument("""--weight_decay""" , type=lowerCAmelCase_ , default=0.0_1 ) parser.add_argument("""--lm_coef""" , type=lowerCAmelCase_ , default=0.9 ) parser.add_argument("""--n_valid""" , type=lowerCAmelCase_ , default=374 ) parser.add_argument("""--server_ip""" , type=lowerCAmelCase_ , default="""""" , help="""Can be used for distant debugging.""" ) parser.add_argument("""--server_port""" , type=lowerCAmelCase_ , default="""""" , help="""Can be used for distant debugging.""" ) lowerCamelCase : Optional[int] = parser.parse_args() print(lowerCAmelCase_ ) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("""Waiting for debugger attach""" ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCAmelCase_ ) ptvsd.wait_for_attach() random.seed(args.seed ) np.random.seed(args.seed ) torch.manual_seed(args.seed ) torch.cuda.manual_seed_all(args.seed ) lowerCamelCase : Optional[Any] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) lowerCamelCase : Dict = torch.cuda.device_count() logger.info("""device: {}, n_gpu {}""".format(lowerCAmelCase_ , lowerCAmelCase_ ) ) if not args.do_train and not args.do_eval: raise ValueError("""At least one of `do_train` or `do_eval` must be True.""" ) if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset lowerCamelCase : Optional[Any] = ['''_start_''', '''_delimiter_''', '''_classify_'''] lowerCamelCase : Optional[int] = OpenAIGPTTokenizer.from_pretrained(args.model_name ) tokenizer.add_tokens(lowerCAmelCase_ ) lowerCamelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) lowerCamelCase : List[str] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name ) model.resize_token_embeddings(len(lowerCAmelCase_ ) ) model.to(lowerCAmelCase_ ) # Load and encode the datasets def tokenize_and_encode(UpperCamelCase__ : Dict ): if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(lowerCAmelCase_ ) ) elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): return obj return [tokenize_and_encode(lowerCAmelCase_ ) for o in obj] logger.info("""Encoding dataset...""" ) lowerCamelCase : Optional[Any] = load_rocstories_dataset(args.train_dataset ) lowerCamelCase : List[Any] = load_rocstories_dataset(args.eval_dataset ) lowerCamelCase : Dict = (train_dataset, eval_dataset) lowerCamelCase : Optional[Any] = tokenize_and_encode(lowerCAmelCase_ ) # Compute the max input length for the Transformer lowerCamelCase : Tuple = model.config.n_positions // 2 - 2 lowerCamelCase : Optional[Any] = max( len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3 for dataset in encoded_datasets for story, conta, conta, _ in dataset ) lowerCamelCase : Optional[int] = min(lowerCAmelCase_ , model.config.n_positions ) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders lowerCamelCase : int = pre_process_datasets(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ ) lowerCamelCase : Dict = tensor_datasets[0], tensor_datasets[1] lowerCamelCase : str = TensorDataset(*lowerCAmelCase_ ) lowerCamelCase : List[Any] = RandomSampler(lowerCAmelCase_ ) lowerCamelCase : List[Any] = DataLoader(lowerCAmelCase_ , sampler=lowerCAmelCase_ , batch_size=args.train_batch_size ) lowerCamelCase : str = TensorDataset(*lowerCAmelCase_ ) lowerCamelCase : Any = SequentialSampler(lowerCAmelCase_ ) lowerCamelCase : str = DataLoader(lowerCAmelCase_ , sampler=lowerCAmelCase_ , batch_size=args.eval_batch_size ) # Prepare optimizer if args.do_train: if args.max_steps > 0: lowerCamelCase : Optional[int] = args.max_steps lowerCamelCase : Optional[int] = args.max_steps // (len(lowerCAmelCase_ ) // args.gradient_accumulation_steps) + 1 else: lowerCamelCase : Dict = len(lowerCAmelCase_ ) // args.gradient_accumulation_steps * args.num_train_epochs lowerCamelCase : Union[str, Any] = list(model.named_parameters() ) lowerCamelCase : str = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight'''] lowerCamelCase : Union[str, Any] = [ { '''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )], '''weight_decay''': args.weight_decay, }, {'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0}, ] lowerCamelCase : Any = AdamW(lowerCAmelCase_ , lr=args.learning_rate , eps=args.adam_epsilon ) lowerCamelCase : str = get_linear_schedule_with_warmup( lowerCAmelCase_ , num_warmup_steps=args.warmup_steps , num_training_steps=lowerCAmelCase_ ) if args.do_train: lowerCamelCase : Optional[int] = 0, 0, None model.train() for _ in trange(int(args.num_train_epochs ) , desc="""Epoch""" ): lowerCamelCase : str = 0 lowerCamelCase : List[str] = 0 lowerCamelCase : Tuple = tqdm(lowerCAmelCase_ , desc="""Training""" ) for step, batch in enumerate(lowerCAmelCase_ ): lowerCamelCase : Any = tuple(t.to(lowerCAmelCase_ ) for t in batch ) lowerCamelCase : Optional[Any] = batch lowerCamelCase : Union[str, Any] = model(lowerCAmelCase_ , mc_token_ids=lowerCAmelCase_ , lm_labels=lowerCAmelCase_ , mc_labels=lowerCAmelCase_ ) lowerCamelCase : List[Any] = args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() lowerCamelCase : Optional[Any] = ( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 lowerCamelCase : int = '''Training loss: {:.2e} lr: {:.2e}'''.format(lowerCAmelCase_ , scheduler.get_lr()[0] ) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer lowerCamelCase : Optional[int] = model.module if hasattr(lowerCAmelCase_ , """module""" ) else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` lowerCamelCase : Dict = os.path.join(args.output_dir , lowerCAmelCase_ ) lowerCamelCase : Dict = os.path.join(args.output_dir , lowerCAmelCase_ ) torch.save(model_to_save.state_dict() , lowerCAmelCase_ ) model_to_save.config.to_json_file(lowerCAmelCase_ ) tokenizer.save_vocabulary(args.output_dir ) # Load a trained model and vocabulary that you have fine-tuned lowerCamelCase : Dict = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir ) lowerCamelCase : Optional[int] = OpenAIGPTTokenizer.from_pretrained(args.output_dir ) model.to(lowerCAmelCase_ ) if args.do_eval: model.eval() lowerCamelCase : Tuple = 0, 0 lowerCamelCase : str = 0, 0 for batch in tqdm(lowerCAmelCase_ , desc="""Evaluating""" ): lowerCamelCase : Any = tuple(t.to(lowerCAmelCase_ ) for t in batch ) lowerCamelCase : Dict = batch with torch.no_grad(): lowerCamelCase : List[Any] = model( lowerCAmelCase_ , mc_token_ids=lowerCAmelCase_ , lm_labels=lowerCAmelCase_ , mc_labels=lowerCAmelCase_ ) lowerCamelCase : int = mc_logits.detach().cpu().numpy() lowerCamelCase : Any = mc_labels.to("""cpu""" ).numpy() lowerCamelCase : Optional[Any] = accuracy(lowerCAmelCase_ , lowerCAmelCase_ ) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0 ) nb_eval_steps += 1 lowerCamelCase : Optional[int] = eval_loss / nb_eval_steps lowerCamelCase : Optional[int] = eval_accuracy / nb_eval_examples lowerCamelCase : List[Any] = tr_loss / nb_tr_steps if args.do_train else None lowerCamelCase : Tuple = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss} lowerCamelCase : str = os.path.join(args.output_dir , """eval_results.txt""" ) with open(lowerCAmelCase_ , """w""" ) as writer: logger.info("""***** Eval results *****""" ) for key in sorted(result.keys() ): logger.info(""" %s = %s""" , lowerCAmelCase_ , str(result[key] ) ) writer.write("""%s = %s\n""" % (key, str(result[key] )) ) if __name__ == "__main__": main()
702
"""simple docstring""" import collections import inspect import unittest from transformers import FocalNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, ) from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A__ : """simple docstring""" def __init__( self: List[Any] , __a: List[str] , __a: Optional[int]=13 , __a: List[str]=32 , __a: int=2 , __a: List[str]=3 , __a: Union[str, Any]=16 , __a: int=[32, 64, 128] , __a: Optional[Any]=[1, 2, 1] , __a: Optional[int]=[2, 2, 4] , __a: Tuple=2 , __a: Dict=2.0 , __a: List[str]=True , __a: Optional[Any]=0.0 , __a: Any=0.0 , __a: List[Any]=0.1 , __a: List[str]="gelu" , __a: Tuple=False , __a: Union[str, Any]=True , __a: Optional[int]=0.02 , __a: Tuple=1e-5 , __a: int=True , __a: List[Any]=None , __a: Optional[int]=True , __a: Dict=10 , __a: List[str]=8 , __a: Any=["stage1", "stage2"] , __a: Union[str, Any]=[1, 2] , )-> Dict: lowerCamelCase : Dict = parent lowerCamelCase : Optional[Any] = batch_size lowerCamelCase : Union[str, Any] = image_size lowerCamelCase : Optional[int] = patch_size lowerCamelCase : Any = num_channels lowerCamelCase : Any = embed_dim lowerCamelCase : Dict = hidden_sizes lowerCamelCase : List[Any] = depths lowerCamelCase : Tuple = num_heads lowerCamelCase : List[Any] = window_size lowerCamelCase : str = mlp_ratio lowerCamelCase : str = qkv_bias lowerCamelCase : str = hidden_dropout_prob lowerCamelCase : Dict = attention_probs_dropout_prob lowerCamelCase : Tuple = drop_path_rate lowerCamelCase : Dict = hidden_act lowerCamelCase : Tuple = use_absolute_embeddings lowerCamelCase : List[str] = patch_norm lowerCamelCase : List[str] = layer_norm_eps lowerCamelCase : str = initializer_range lowerCamelCase : Tuple = is_training lowerCamelCase : int = scope lowerCamelCase : Union[str, Any] = use_labels lowerCamelCase : List[str] = type_sequence_label_size lowerCamelCase : str = encoder_stride lowerCamelCase : List[str] = out_features lowerCamelCase : Optional[int] = out_indices def a__ ( self: Optional[Any] )-> Union[str, Any]: lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase : str = None if self.use_labels: lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase : str = self.get_config() return config, pixel_values, labels def a__ ( self: List[Any] )-> Optional[int]: return FocalNetConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def a__ ( self: Tuple , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]: lowerCamelCase : Tuple = FocalNetModel(config=__a ) model.to(__a ) model.eval() lowerCamelCase : Tuple = model(__a ) lowerCamelCase : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowerCamelCase : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def a__ ( self: Optional[int] , __a: Dict , __a: Tuple , __a: List[Any] )-> int: lowerCamelCase : List[Any] = FocalNetBackbone(config=__a ) model.to(__a ) model.eval() lowerCamelCase : Optional[Any] = model(__a ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] ) # verify backbone works with out_features=None lowerCamelCase : Dict = None lowerCamelCase : Dict = FocalNetBackbone(config=__a ) model.to(__a ) model.eval() lowerCamelCase : Any = model(__a ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def a__ ( self: Optional[int] , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]: lowerCamelCase : Tuple = FocalNetForMaskedImageModeling(config=__a ) model.to(__a ) model.eval() lowerCamelCase : List[str] = model(__a ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCamelCase : List[str] = 1 lowerCamelCase : Any = FocalNetForMaskedImageModeling(__a ) model.to(__a ) model.eval() lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase : Tuple = model(__a ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def a__ ( self: str , __a: Optional[Any] , __a: Optional[Any] , __a: Tuple )-> str: lowerCamelCase : Optional[Any] = self.type_sequence_label_size lowerCamelCase : Optional[Any] = FocalNetForImageClassification(__a ) model.to(__a ) model.eval() lowerCamelCase : List[str] = model(__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCamelCase : int = 1 lowerCamelCase : List[Any] = FocalNetForImageClassification(__a ) model.to(__a ) model.eval() lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase : Optional[Any] = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def a__ ( self: int )-> Optional[int]: lowerCamelCase : str = self.prepare_config_and_inputs() lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = config_and_inputs lowerCamelCase : List[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class A__ ( __lowercase , __lowercase , unittest.TestCase): """simple docstring""" snake_case__ : List[str] =( ( FocalNetModel, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetBackbone, ) if is_torch_available() else () ) snake_case__ : Optional[int] =( {'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification} if is_torch_available() else {} ) snake_case__ : Tuple =False snake_case__ : Dict =False snake_case__ : Dict =False snake_case__ : Tuple =False snake_case__ : Optional[int] =False def a__ ( self: Union[str, Any] )-> Optional[int]: lowerCamelCase : List[str] = FocalNetModelTester(self ) lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__a , embed_dim=37 , has_text_modality=__a ) def a__ ( self: List[str] )-> List[str]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a__ ( self: List[str] )-> Union[str, Any]: return def a__ ( self: Tuple )-> Tuple: lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def a__ ( self: List[Any] )-> Dict: lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__a ) def a__ ( self: List[Any] )-> Tuple: lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__a ) def a__ ( self: List[str] )-> Dict: lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a ) @unittest.skip(reason="""FocalNet does not use inputs_embeds""" ) def a__ ( self: Optional[Any] )-> str: pass @unittest.skip(reason="""FocalNet does not use feedforward chunking""" ) def a__ ( self: Optional[Any] )-> Dict: pass def a__ ( self: Optional[Any] )-> Dict: lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: lowerCamelCase : Any = model_class(__a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCamelCase : Dict = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__a , nn.Linear ) ) def a__ ( self: Tuple )-> Optional[int]: lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: lowerCamelCase : int = model_class(__a ) lowerCamelCase : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase : Any = [*signature.parameters.keys()] lowerCamelCase : List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __a ) def a__ ( self: str , __a: Union[str, Any] , __a: int , __a: Tuple , __a: List[str] )-> Union[str, Any]: lowerCamelCase : List[Any] = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): lowerCamelCase : List[str] = model(**self._prepare_for_class(__a , __a ) ) lowerCamelCase : List[str] = outputs.hidden_states lowerCamelCase : Tuple = getattr( self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(__a ) , __a ) # FocalNet has a different seq_length lowerCamelCase : Tuple = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCamelCase : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) lowerCamelCase : Optional[Any] = outputs.reshaped_hidden_states self.assertEqual(len(__a ) , __a ) lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = reshaped_hidden_states[0].shape lowerCamelCase : Tuple = ( reshaped_hidden_states[0].view(__a , __a , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def a__ ( self: Any )-> Any: lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase : Union[str, Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes[:-1]: lowerCamelCase : List[str] = True self.check_hidden_states_output(__a , __a , __a , __a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase : List[Any] = True self.check_hidden_states_output(__a , __a , __a , __a ) def a__ ( self: str )-> Union[str, Any]: lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase : List[str] = 3 lowerCamelCase : Any = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowerCamelCase : Optional[int] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCamelCase : Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowerCamelCase : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes[:-1]: lowerCamelCase : str = True self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase : Union[str, Any] = True self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) ) @slow def a__ ( self: Optional[int] )-> List[Any]: for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase : List[str] = FocalNetModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def a__ ( self: str )-> Any: lowerCamelCase , lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase : int = _config_zero_init(__a ) for model_class in self.all_model_classes: lowerCamelCase : int = model_class(config=__a ) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) @require_vision @require_torch class A__ ( unittest.TestCase): """simple docstring""" @cached_property def a__ ( self: Optional[int] )-> Optional[Any]: # TODO update organization return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None @slow def a__ ( self: int )-> Optional[Any]: lowerCamelCase : Tuple = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(__a ) lowerCamelCase : Any = self.default_image_processor lowerCamelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) lowerCamelCase : int = image_processor(images=__a , return_tensors="""pt""" ).to(__a ) # forward pass with torch.no_grad(): lowerCamelCase : Any = model(**__a ) # verify the logits lowerCamelCase : Tuple = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , __a ) lowerCamelCase : List[str] = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(__a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) ) self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 ) @require_torch class A__ ( __lowercase , unittest.TestCase): """simple docstring""" snake_case__ : str =(FocalNetBackbone,) if is_torch_available() else () snake_case__ : Optional[int] =FocalNetConfig snake_case__ : str =False def a__ ( self: Union[str, Any] )-> Tuple: lowerCamelCase : str = FocalNetModelTester(self )
42
0
"""simple docstring""" import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch __lowerCamelCase :Any = logging.get_logger(__name__) class A__ : """simple docstring""" def __init__( self: Optional[int] , __a: Optional[int] = None , __a: Union[str, Any] = None , __a: Optional[int]=None , __a: Tuple=None )-> Tuple: if not conversation_id: lowerCamelCase : int = uuid.uuida() if past_user_inputs is None: lowerCamelCase : int = [] if generated_responses is None: lowerCamelCase : List[Any] = [] lowerCamelCase : uuid.UUID = conversation_id lowerCamelCase : List[str] = past_user_inputs lowerCamelCase : List[str] = generated_responses lowerCamelCase : Optional[str] = text def __eq__( self: str , __a: List[str] )-> Union[str, Any]: if not isinstance(lowercase__ , lowercase__ ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def a__ ( self: Optional[Any] , __a: List[str] , __a: str = False )-> Any: if self.new_user_input: if overwrite: logger.warning( f'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten ' f'with: "{text}".' ) lowerCamelCase : Any = text else: logger.warning( f'User input added while unprocessed input was existing: "{self.new_user_input}" new input ' f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input' ) else: lowerCamelCase : List[Any] = text def a__ ( self: List[Any] )-> Any: if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) lowerCamelCase : Dict = None def a__ ( self: List[Any] , __a: Optional[int] )-> List[str]: self.generated_responses.append(lowercase__ ) def a__ ( self: List[Any] )-> Any: for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self: str )-> Dict: lowerCamelCase : List[str] = f'Conversation id: {self.uuid} \n' for is_user, text in self.iter_texts(): lowerCamelCase : Optional[Any] = """user""" if is_user else """bot""" output += f'{name} >> {text} \n' return output @add_end_docstrings( _UpperCAmelCase , R''' min_length_for_response (`int`, *optional*, defaults to 32): The minimum length (in number of tokens) for a response. minimum_tokens (`int`, *optional*, defaults to 10): The minimum length of tokens to leave for a response. ''' , ) class A__ ( _UpperCAmelCase): """simple docstring""" def __init__( self: Optional[Any] , *__a: Tuple , **__a: int )-> Dict: super().__init__(*lowercase__ , **lowercase__ ) if self.tokenizer.pad_token_id is None: lowerCamelCase : Union[str, Any] = self.tokenizer.eos_token def a__ ( self: Optional[Any] , __a: Optional[Any]=None , __a: List[str]=None , __a: str=None , **__a: Any )-> int: lowerCamelCase : Tuple = {} lowerCamelCase : Optional[Any] = {} lowerCamelCase : Optional[Any] = {} if min_length_for_response is not None: lowerCamelCase : Dict = min_length_for_response if minimum_tokens is not None: lowerCamelCase : Union[str, Any] = minimum_tokens if "max_length" in generate_kwargs: lowerCamelCase : List[str] = generate_kwargs["""max_length"""] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: lowerCamelCase : Union[str, Any] = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(lowercase__ ) return preprocess_params, forward_params, postprocess_params def __call__( self: Union[str, Any] , __a: Any , __a: Any=0 , **__a: Dict )-> List[Any]: lowerCamelCase : List[str] = super().__call__(lowercase__ , num_workers=lowercase__ , **lowercase__ ) if isinstance(lowercase__ , lowercase__ ) and len(lowercase__ ) == 1: return outputs[0] return outputs def a__ ( self: Optional[int] , __a: Union[str, Any] , __a: str=32 )-> str: if not isinstance(lowercase__ , lowercase__ ): raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" ) if conversation.new_user_input is None: raise ValueError( f'Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. ' """Add user inputs with the conversation's `add_user_input` method""" ) if hasattr(self.tokenizer , """_build_conversation_input_ids""" ): lowerCamelCase : str = self.tokenizer._build_conversation_input_ids(lowercase__ ) else: # If the tokenizer cannot handle conversations, we default to only the old version lowerCamelCase : Any = self._legacy_parse_and_tokenize(lowercase__ ) if self.framework == "pt": lowerCamelCase : Optional[Any] = torch.LongTensor([input_ids] ) elif self.framework == "tf": lowerCamelCase : Dict = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def a__ ( self: Tuple , __a: List[str] , __a: str=10 , **__a: Optional[Any] )-> Union[str, Any]: lowerCamelCase : Optional[int] = generate_kwargs.get("""max_length""" , self.model.config.max_length ) lowerCamelCase : str = model_inputs["""input_ids"""].shape[1] if max_length - minimum_tokens < n: logger.warning(f'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})' ) lowerCamelCase : int = max_length - minimum_tokens lowerCamelCase : List[Any] = model_inputs["""input_ids"""][:, -trim:] if "attention_mask" in model_inputs: lowerCamelCase : Union[str, Any] = model_inputs["""attention_mask"""][:, -trim:] lowerCamelCase : int = model_inputs.pop("""conversation""" ) lowerCamelCase : Dict = max_length lowerCamelCase : Tuple = self.model.generate(**lowercase__ , **lowercase__ ) if self.model.config.is_encoder_decoder: lowerCamelCase : str = 1 else: lowerCamelCase : List[Any] = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def a__ ( self: Optional[int] , __a: str , __a: Any=True )-> Any: lowerCamelCase : int = model_outputs["""output_ids"""] lowerCamelCase : List[Any] = self.tokenizer.decode( output_ids[0] , skip_special_tokens=lowercase__ , clean_up_tokenization_spaces=lowercase__ , ) lowerCamelCase : Optional[Any] = model_outputs["""conversation"""] conversation.mark_processed() conversation.append_response(lowercase__ ) return conversation def a__ ( self: Dict , __a: Dict )-> Union[str, Any]: lowerCamelCase : Dict = self.tokenizer.eos_token_id lowerCamelCase : Optional[int] = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ ) ) if len(lowercase__ ) > self.tokenizer.model_max_length: lowerCamelCase : int = input_ids[-self.tokenizer.model_max_length :] return input_ids
703
"""simple docstring""" import os def snake_case ( ) -> Optional[Any]: with open(os.path.dirname(UpperCamelCase__ ) + """/grid.txt""" ) as f: lowerCamelCase : int = [] # noqa: E741 for _ in range(20 ): l.append([int(UpperCamelCase__ ) for x in f.readline().split()] ) lowerCamelCase : Union[str, Any] = 0 # right for i in range(20 ): for j in range(17 ): lowerCamelCase : Dict = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3] if temp > maximum: lowerCamelCase : Tuple = temp # down for i in range(17 ): for j in range(20 ): lowerCamelCase : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j] if temp > maximum: lowerCamelCase : Optional[Any] = temp # diagonal 1 for i in range(17 ): for j in range(17 ): lowerCamelCase : List[Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3] if temp > maximum: lowerCamelCase : List[str] = temp # diagonal 2 for i in range(17 ): for j in range(3 , 20 ): lowerCamelCase : List[str] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3] if temp > maximum: lowerCamelCase : List[Any] = temp return maximum if __name__ == "__main__": print(solution())
42
0
"""simple docstring""" import numpy as np from PIL import Image def snake_case ( UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Optional[Any]: lowerCamelCase : Optional[int] = np.array(a_ ) if arr.shape[0] != arr.shape[1]: raise ValueError("""The input array is not a square matrix""" ) lowerCamelCase : Dict = 0 lowerCamelCase : int = 0 lowerCamelCase : List[Any] = 0 lowerCamelCase : Dict = 0 # compute the shape of the output matrix lowerCamelCase : Optional[Any] = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape lowerCamelCase : Union[str, Any] = np.zeros((maxpool_shape, maxpool_shape) ) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix lowerCamelCase : Union[str, Any] = np.max(arr[i : i + size, j : j + size] ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 lowerCamelCase : Dict = 0 lowerCamelCase : int = 0 return updated_arr def snake_case ( UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Tuple: lowerCamelCase : Optional[int] = np.array(a_ ) if arr.shape[0] != arr.shape[1]: raise ValueError("""The input array is not a square matrix""" ) lowerCamelCase : Any = 0 lowerCamelCase : Any = 0 lowerCamelCase : Any = 0 lowerCamelCase : List[Any] = 0 # compute the shape of the output matrix lowerCamelCase : List[Any] = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape lowerCamelCase : Union[str, Any] = np.zeros((avgpool_shape, avgpool_shape) ) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix lowerCamelCase : Optional[int] = int(np.average(arr[i : i + size, j : j + size] ) ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 lowerCamelCase : str = 0 lowerCamelCase : List[str] = 0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name='avgpooling', verbose=True) # Loading the image __lowerCamelCase :List[Any] = Image.open('path_to_image') # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
704
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionAttendAndExcitePipeline, UNetaDConditionModel, ) from diffusers.utils import load_numpy, skip_mps, slow from diffusers.utils.testing_utils import require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin __lowerCamelCase :Any = False @skip_mps class A__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase): """simple docstring""" snake_case__ : Optional[Any] =StableDiffusionAttendAndExcitePipeline snake_case__ : Any =False snake_case__ : Dict =TEXT_TO_IMAGE_PARAMS snake_case__ : Any =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''}) snake_case__ : Dict =TEXT_TO_IMAGE_IMAGE_PARAMS snake_case__ : str =TEXT_TO_IMAGE_IMAGE_PARAMS @classmethod def a__ ( cls: Dict )-> Tuple: super().setUpClass() torch.use_deterministic_algorithms(__a ) @classmethod def a__ ( cls: Union[str, Any] )-> Any: super().tearDownClass() torch.use_deterministic_algorithms(__a ) def a__ ( self: Tuple )-> Union[str, Any]: torch.manual_seed(0 ) lowerCamelCase : str = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , ) lowerCamelCase : Union[str, Any] = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=__a , set_alpha_to_one=__a , ) torch.manual_seed(0 ) lowerCamelCase : Union[str, Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) lowerCamelCase : str = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , ) lowerCamelCase : Optional[int] = CLIPTextModel(__a ) lowerCamelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowerCamelCase : List[str] = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def a__ ( self: Tuple , __a: int , __a: Union[str, Any]=0 )-> Optional[Any]: if str(__a ).startswith("""mps""" ): lowerCamelCase : Tuple = torch.manual_seed(__a ) else: lowerCamelCase : str = torch.Generator(device=__a ).manual_seed(__a ) lowerCamelCase : Dict = { """prompt""": """a cat and a frog""", """token_indices""": [2, 5], """generator""": generator, """num_inference_steps""": 1, """guidance_scale""": 6.0, """output_type""": """numpy""", """max_iter_to_alter""": 2, """thresholds""": {0: 0.7}, } return inputs def a__ ( self: Dict )-> str: lowerCamelCase : Tuple = """cpu""" lowerCamelCase : List[str] = self.get_dummy_components() lowerCamelCase : List[Any] = self.pipeline_class(**__a ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) lowerCamelCase : Any = self.get_dummy_inputs(__a ) lowerCamelCase : Union[str, Any] = pipe(**__a ).images lowerCamelCase : Tuple = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 64, 64, 3) ) lowerCamelCase : Optional[Any] = np.array( [0.63_90_53_64, 0.62_89_73_07, 0.48_59_90_17, 0.5_13_36_24, 0.5_55_00_48, 0.45_76_95_16, 0.50_32_69_73, 0.5_02_31_39, 0.45_38_44_96] ) lowerCamelCase : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(__a , 1e-3 ) def a__ ( self: int )-> Optional[Any]: super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 ) def a__ ( self: Union[str, Any] )-> Optional[int]: # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def a__ ( self: Tuple )-> int: self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 ) def a__ ( self: Dict )-> List[Any]: super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 ) def a__ ( self: Optional[int] )-> Dict: super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 ) def a__ ( self: Any )-> Tuple: super().test_save_load_local(expected_max_difference=5e-4 ) def a__ ( self: str )-> str: super().test_save_load_optional_components(expected_max_difference=4e-4 ) @require_torch_gpu @slow class A__ ( unittest.TestCase): """simple docstring""" @classmethod def a__ ( cls: Any )-> Tuple: super().setUpClass() torch.use_deterministic_algorithms(__a ) @classmethod def a__ ( cls: Dict )-> Optional[int]: super().tearDownClass() torch.use_deterministic_algorithms(__a ) def a__ ( self: int )-> Optional[int]: super().tearDown() gc.collect() torch.cuda.empty_cache() def a__ ( self: int )-> Optional[Any]: lowerCamelCase : List[Any] = torch.manual_seed(51 ) lowerCamelCase : List[str] = StableDiffusionAttendAndExcitePipeline.from_pretrained( """CompVis/stable-diffusion-v1-4""" , safety_checker=__a , torch_dtype=torch.floataa ) pipe.to("""cuda""" ) lowerCamelCase : Dict = """a painting of an elephant with glasses""" lowerCamelCase : Any = [5, 7] lowerCamelCase : Tuple = pipe( prompt=__a , token_indices=__a , guidance_scale=7.5 , generator=__a , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0] lowerCamelCase : Union[str, Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" ) assert np.abs((expected_image - image).max() ) < 5e-1
42
0
"""simple docstring""" import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() __lowerCamelCase :Union[str, Any] = logging.get_logger(__name__) def snake_case ( UpperCamelCase__ : List[str] ) -> str: lowerCamelCase : List[Any] = MobileNetVaConfig(layer_norm_eps=0.0_0_1 ) if "_quant" in model_name: raise ValueError("""Quantized models are not supported.""" ) lowerCamelCase : Any = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""" , UpperCamelCase__ ) if matches: lowerCamelCase : int = float(matches[1] ) lowerCamelCase : Union[str, Any] = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". lowerCamelCase : Dict = 1001 lowerCamelCase : Dict = """imagenet-1k-id2label.json""" lowerCamelCase : int = """huggingface/label-files""" lowerCamelCase : Union[str, Any] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) ) lowerCamelCase : Optional[Any] = {int(UpperCamelCase__ ) + 1: v for k, v in idalabel.items()} lowerCamelCase : Union[str, Any] = """background""" lowerCamelCase : List[Any] = idalabel lowerCamelCase : Any = {v: k for k, v in idalabel.items()} return config def snake_case ( ) -> str: lowerCamelCase : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCamelCase : List[Any] = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ) return im @torch.no_grad() def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any]=False ) -> Union[str, Any]: lowerCamelCase : int = get_mobilenet_va_config(UpperCamelCase__ ) # Load 🤗 model lowerCamelCase : int = MobileNetVaForImageClassification(UpperCamelCase__ ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor lowerCamelCase : Dict = MobileNetVaImageProcessor( crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , ) lowerCamelCase : Tuple = image_processor(images=prepare_img() , return_tensors="""pt""" ) lowerCamelCase : Dict = model(**UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = outputs.logits assert logits.shape == (1, 1001) if model_name == "mobilenet_v1_1.0_224": lowerCamelCase : Dict = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] ) elif model_name == "mobilenet_v1_0.75_192": lowerCamelCase : Dict = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] ) else: lowerCamelCase : Dict = None if expected_logits is not None: assert torch.allclose(logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) print(F'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(UpperCamelCase__ ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(UpperCamelCase__ ) if push_to_hub: print("""Pushing to the hub...""" ) lowerCamelCase : Optional[Any] = """google/""" + model_name image_processor.push_to_hub(UpperCamelCase__ ) model.push_to_hub(UpperCamelCase__ ) if __name__ == "__main__": __lowerCamelCase :str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='mobilenet_v1_1.0_224', type=str, help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.', ) parser.add_argument( '--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).' ) parser.add_argument( '--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __lowerCamelCase :Optional[int] = parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
705
"""simple docstring""" import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel from transformers.models.esm.modeling_esm import ( ESM_PRETRAINED_MODEL_ARCHIVE_LIST, EsmEmbeddings, create_position_ids_from_input_ids, ) class A__ : """simple docstring""" def __init__( self: List[str] , __a: List[str] , __a: Dict=13 , __a: Tuple=7 , __a: Dict=False , __a: str=True , __a: List[Any]=False , __a: Dict=True , __a: Any=33 , __a: Optional[Any]=32 , __a: List[Any]=5 , __a: Any=4 , __a: Dict=37 , __a: str="gelu" , __a: str=0.1 , __a: int=0.1 , __a: Optional[int]=512 , __a: List[Any]=16 , __a: int=2 , __a: int=0.02 , __a: Optional[int]=3 , __a: str=4 , __a: Tuple=None , )-> Tuple: lowerCamelCase : Union[str, Any] = parent lowerCamelCase : Tuple = batch_size lowerCamelCase : Any = seq_length lowerCamelCase : Any = is_training lowerCamelCase : Tuple = use_input_mask lowerCamelCase : int = use_token_type_ids lowerCamelCase : List[str] = use_labels lowerCamelCase : Optional[int] = vocab_size lowerCamelCase : Tuple = hidden_size lowerCamelCase : List[str] = num_hidden_layers lowerCamelCase : Optional[int] = num_attention_heads lowerCamelCase : Optional[Any] = intermediate_size lowerCamelCase : Optional[Any] = hidden_act lowerCamelCase : Union[str, Any] = hidden_dropout_prob lowerCamelCase : Optional[Any] = attention_probs_dropout_prob lowerCamelCase : Any = max_position_embeddings lowerCamelCase : str = type_vocab_size lowerCamelCase : List[Any] = type_sequence_label_size lowerCamelCase : Optional[Any] = initializer_range lowerCamelCase : Union[str, Any] = num_labels lowerCamelCase : Optional[Any] = num_choices lowerCamelCase : Any = scope def a__ ( self: Optional[int] )-> List[Any]: lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase : Dict = None if self.use_input_mask: lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase : Any = None lowerCamelCase : int = None lowerCamelCase : Union[str, Any] = None if self.use_labels: lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase : List[str] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def a__ ( self: Tuple )-> Union[str, Any]: return EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def a__ ( self: List[Any] , __a: List[str] , __a: str , __a: Tuple , __a: List[str] , __a: List[str] , __a: str )-> int: lowerCamelCase : Optional[int] = EsmModel(config=__a ) model.to(__a ) model.eval() lowerCamelCase : int = model(__a , attention_mask=__a ) lowerCamelCase : str = model(__a ) lowerCamelCase : Optional[Any] = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def a__ ( self: int , __a: Union[str, Any] , __a: Optional[int] , __a: List[str] , __a: str , __a: List[str] , __a: Tuple )-> int: lowerCamelCase : str = EsmForMaskedLM(config=__a ) model.to(__a ) model.eval() lowerCamelCase : List[Any] = model(__a , attention_mask=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a__ ( self: List[str] , __a: List[Any] , __a: List[str] , __a: int , __a: Union[str, Any] , __a: List[Any] , __a: Tuple )-> List[str]: lowerCamelCase : Tuple = self.num_labels lowerCamelCase : Dict = EsmForTokenClassification(config=__a ) model.to(__a ) model.eval() lowerCamelCase : int = model(__a , attention_mask=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def a__ ( self: Optional[int] )-> Optional[int]: lowerCamelCase : Any = self.prepare_config_and_inputs() ( ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ) : Tuple = config_and_inputs lowerCamelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class A__ ( __lowercase , __lowercase , unittest.TestCase): """simple docstring""" snake_case__ : Any =False snake_case__ : Dict =( ( EsmForMaskedLM, EsmModel, EsmForSequenceClassification, EsmForTokenClassification, ) if is_torch_available() else () ) snake_case__ : Dict =() snake_case__ : Optional[int] =( { '''feature-extraction''': EsmModel, '''fill-mask''': EsmForMaskedLM, '''text-classification''': EsmForSequenceClassification, '''token-classification''': EsmForTokenClassification, '''zero-shot''': EsmForSequenceClassification, } if is_torch_available() else {} ) snake_case__ : Any =True def a__ ( self: Optional[int] )-> Optional[int]: lowerCamelCase : Optional[Any] = EsmModelTester(self ) lowerCamelCase : Any = ConfigTester(self , config_class=__a , hidden_size=37 ) def a__ ( self: List[Any] )-> Optional[Any]: self.config_tester.run_common_tests() def a__ ( self: int )-> Optional[Any]: lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def a__ ( self: Tuple )-> Any: lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCamelCase : Tuple = type self.model_tester.create_and_check_model(*__a ) def a__ ( self: List[str] )-> Tuple: lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__a ) def a__ ( self: int )-> Optional[Any]: lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__a ) @slow def a__ ( self: Any )-> List[Any]: for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase : int = EsmModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def a__ ( self: str )-> List[str]: lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0] lowerCamelCase : Union[str, Any] = EsmEmbeddings(config=__a ) lowerCamelCase : List[str] = torch.as_tensor([[12, 31, 13, model.padding_idx]] ) lowerCamelCase : Union[str, Any] = torch.as_tensor( [ [ 0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx, ] ] ) lowerCamelCase : Optional[Any] = create_position_ids_from_input_ids(__a , model.padding_idx ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__a , __a ) ) ) def a__ ( self: Optional[int] )-> int: lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()[0] lowerCamelCase : Any = EsmEmbeddings(config=__a ) lowerCamelCase : Dict = torch.empty(2 , 4 , 30 ) lowerCamelCase : List[Any] = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] lowerCamelCase : Any = torch.as_tensor([expected_single_positions, expected_single_positions] ) lowerCamelCase : List[str] = embeddings.create_position_ids_from_inputs_embeds(__a ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__a , __a ) ) ) @unittest.skip("""Esm does not support embedding resizing""" ) def a__ ( self: Any )-> Optional[Any]: pass @unittest.skip("""Esm does not support embedding resizing""" ) def a__ ( self: Dict )-> Dict: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def a__ ( self: List[str] )-> Dict: pass @require_torch class A__ ( __lowercase): """simple docstring""" @slow def a__ ( self: Any )-> Union[str, Any]: with torch.no_grad(): lowerCamelCase : Union[str, Any] = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" ) model.eval() lowerCamelCase : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] ) lowerCamelCase : Tuple = model(__a )[0] lowerCamelCase : Dict = 33 lowerCamelCase : List[str] = torch.Size((1, 6, vocab_size) ) self.assertEqual(output.shape , __a ) lowerCamelCase : Tuple = torch.tensor( [[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) ) @slow def a__ ( self: Dict )-> str: with torch.no_grad(): lowerCamelCase : Any = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" ) model.eval() lowerCamelCase : Optional[Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) lowerCamelCase : Any = model(__a )[0] # compare the actual values for a slice. lowerCamelCase : Tuple = torch.tensor( [[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
42
0
"""simple docstring""" import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging __lowerCamelCase :Optional[Any] = logging.get_logger(__name__) __lowerCamelCase :Union[str, Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'} # See all LED models at https://huggingface.co/models?filter=LED __lowerCamelCase :Union[str, Any] = { 'vocab_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json', }, 'merges_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt', }, 'tokenizer_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json', }, } __lowerCamelCase :Union[str, Any] = { 'allenai/led-base-16384': 16_384, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def snake_case ( ) -> Union[str, Any]: lowerCamelCase : Union[str, Any] = ( list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) ) ) lowerCamelCase : List[str] = bs[:] lowerCamelCase : str = 0 for b in range(2**8 ): if b not in bs: bs.append(lowercase_ ) cs.append(2**8 + n ) n += 1 lowerCamelCase : str = [chr(lowercase_ ) for n in cs] return dict(zip(lowercase_ , lowercase_ ) ) def snake_case ( UpperCamelCase__ : int ) -> str: lowerCamelCase : Dict = set() lowerCamelCase : Optional[Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCamelCase : Dict = char return pairs class A__ ( _UpperCamelCase): """simple docstring""" snake_case__ : Optional[int] =VOCAB_FILES_NAMES snake_case__ : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP snake_case__ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ : Optional[int] =["input_ids", "attention_mask"] def __init__( self: Optional[Any] , __a: int , __a: Optional[int] , __a: Optional[int]="replace" , __a: Any="<s>" , __a: Dict="</s>" , __a: List[str]="</s>" , __a: int="<s>" , __a: Any="<unk>" , __a: str="<pad>" , __a: Any="<mask>" , __a: List[Any]=False , **__a: List[str] , )-> str: lowerCamelCase : Tuple = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else bos_token lowerCamelCase : List[str] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else eos_token lowerCamelCase : Tuple = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else sep_token lowerCamelCase : str = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else cls_token lowerCamelCase : Any = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else unk_token lowerCamelCase : List[Any] = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase : int = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token super().__init__( errors=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , add_prefix_space=__a , **__a , ) with open(__a , encoding="""utf-8""" ) as vocab_handle: lowerCamelCase : int = json.load(__a ) lowerCamelCase : str = {v: k for k, v in self.encoder.items()} lowerCamelCase : int = errors # how to handle errors in decoding lowerCamelCase : str = bytes_to_unicode() lowerCamelCase : Optional[Any] = {v: k for k, v in self.byte_encoder.items()} with open(__a , encoding="""utf-8""" ) as merges_handle: lowerCamelCase : List[Any] = merges_handle.read().split("""\n""" )[1:-1] lowerCamelCase : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges] lowerCamelCase : List[Any] = dict(zip(__a , range(len(__a ) ) ) ) lowerCamelCase : Any = {} lowerCamelCase : List[Any] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowerCamelCase : Optional[Any] = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def a__ ( self: Dict )-> Optional[Any]: return len(self.encoder ) def a__ ( self: List[str] )-> Tuple: return dict(self.encoder , **self.added_tokens_encoder ) def a__ ( self: List[Any] , __a: Optional[Any] )-> Tuple: if token in self.cache: return self.cache[token] lowerCamelCase : Optional[int] = tuple(__a ) lowerCamelCase : List[Any] = get_pairs(__a ) if not pairs: return token while True: lowerCamelCase : List[str] = min(__a , key=lambda __a : self.bpe_ranks.get(__a , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break lowerCamelCase : Union[str, Any] = bigram lowerCamelCase : Dict = [] lowerCamelCase : Optional[Any] = 0 while i < len(__a ): try: lowerCamelCase : Dict = word.index(__a , __a ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCamelCase : str = j if word[i] == first and i < len(__a ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCamelCase : str = tuple(__a ) lowerCamelCase : Optional[int] = new_word if len(__a ) == 1: break else: lowerCamelCase : Any = get_pairs(__a ) lowerCamelCase : str = " ".join(__a ) lowerCamelCase : Dict = word return word def a__ ( self: List[str] , __a: List[str] )-> Tuple: lowerCamelCase : str = [] for token in re.findall(self.pat , __a ): lowerCamelCase : List[str] = "".join( self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__a ).split(""" """ ) ) return bpe_tokens def a__ ( self: List[str] , __a: int )-> Tuple: return self.encoder.get(__a , self.encoder.get(self.unk_token ) ) def a__ ( self: List[Any] , __a: Dict )-> Union[str, Any]: return self.decoder.get(__a ) def a__ ( self: Dict , __a: Tuple )-> str: lowerCamelCase : Tuple = "".join(__a ) lowerCamelCase : List[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors ) return text def a__ ( self: Any , __a: str , __a: Optional[str] = None )-> Tuple[str]: if not os.path.isdir(__a ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return lowerCamelCase : Tuple = os.path.join( __a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCamelCase : Dict = os.path.join( __a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(__a , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__a , ensure_ascii=__a ) + """\n""" ) lowerCamelCase : str = 0 with open(__a , """w""" , encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __a : kv[1] ): if index != token_index: logger.warning( f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.' """ Please check that the tokenizer is not corrupted!""" ) lowerCamelCase : Union[str, Any] = token_index writer.write(""" """.join(__a ) + """\n""" ) index += 1 return vocab_file, merge_file def a__ ( self: int , __a: List[int] , __a: Optional[List[int]] = None )-> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCamelCase : Tuple = [self.cls_token_id] lowerCamelCase : List[Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def a__ ( self: List[Any] , __a: List[int] , __a: Optional[List[int]] = None , __a: bool = False )-> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a ) if token_ids_a is None: return [1] + ([0] * len(__a )) + [1] return [1] + ([0] * len(__a )) + [1, 1] + ([0] * len(__a )) + [1] def a__ ( self: str , __a: List[int] , __a: Optional[List[int]] = None )-> List[int]: lowerCamelCase : Optional[int] = [self.sep_token_id] lowerCamelCase : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def a__ ( self: Any , __a: str , __a: int=False , **__a: List[Any] )-> List[Any]: lowerCamelCase : Optional[Any] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__a ) > 0 and not text[0].isspace()): lowerCamelCase : List[Any] = " " + text return (text, kwargs) def a__ ( self: Dict , __a: Union[Dict[str, EncodedInput], BatchEncoding] , __a: Optional[int] = None , __a: PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __a: Optional[int] = None , __a: Optional[bool] = None , )-> dict: lowerCamelCase : Any = super()._pad( encoded_inputs=__a , max_length=__a , padding_strategy=__a , pad_to_multiple_of=__a , return_attention_mask=__a , ) # Load from model defaults if return_attention_mask is None: lowerCamelCase : str = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: lowerCamelCase : Dict = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. lowerCamelCase : str = len(encoded_inputs["""global_attention_mask"""] ) != len(__a ) if needs_to_be_padded: lowerCamelCase : List[str] = len(__a ) - len(encoded_inputs["""global_attention_mask"""] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` lowerCamelCase : str = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": lowerCamelCase : List[Any] = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) ) return encoded_inputs
706
"""simple docstring""" import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __lowerCamelCase :str = get_tests_dir('fixtures/spiece.model') @require_sentencepiece @require_tokenizers class A__ ( __lowercase , unittest.TestCase): """simple docstring""" snake_case__ : List[str] =AlbertTokenizer snake_case__ : Optional[Any] =AlbertTokenizerFast snake_case__ : Optional[int] =True snake_case__ : Any =True snake_case__ : Optional[int] =True def a__ ( self: Dict )-> Optional[Any]: super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase : int = AlbertTokenizer(__a ) tokenizer.save_pretrained(self.tmpdirname ) def a__ ( self: Tuple , __a: Tuple )-> Union[str, Any]: lowerCamelCase : List[str] = """this is a test""" lowerCamelCase : int = """this is a test""" return input_text, output_text def a__ ( self: Any )-> List[Any]: lowerCamelCase : int = """<pad>""" lowerCamelCase : Optional[int] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a ) def a__ ( self: Tuple )-> str: lowerCamelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<pad>""" ) self.assertEqual(vocab_keys[1] , """<unk>""" ) self.assertEqual(vocab_keys[-1] , """▁eloquent""" ) self.assertEqual(len(__a ) , 30_000 ) def a__ ( self: List[str] )-> Any: self.assertEqual(self.get_tokenizer().vocab_size , 30_000 ) def a__ ( self: Optional[Any] )-> Union[str, Any]: if not self.test_rust_tokenizer: return lowerCamelCase : str = self.get_tokenizer() lowerCamelCase : Tuple = self.get_rust_tokenizer() lowerCamelCase : Union[str, Any] = """I was born in 92000, and this is falsé.""" lowerCamelCase : List[str] = tokenizer.tokenize(__a ) lowerCamelCase : Tuple = rust_tokenizer.tokenize(__a ) self.assertListEqual(__a , __a ) lowerCamelCase : Dict = tokenizer.encode(__a , add_special_tokens=__a ) lowerCamelCase : List[str] = rust_tokenizer.encode(__a , add_special_tokens=__a ) self.assertListEqual(__a , __a ) lowerCamelCase : Any = self.get_rust_tokenizer() lowerCamelCase : List[str] = tokenizer.encode(__a ) lowerCamelCase : str = rust_tokenizer.encode(__a ) self.assertListEqual(__a , __a ) def a__ ( self: Tuple )-> List[Any]: lowerCamelCase : List[str] = AlbertTokenizer(__a , keep_accents=__a ) lowerCamelCase : int = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(__a , ["""▁this""", """▁is""", """▁a""", """▁test"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [48, 25, 21, 1_289] ) lowerCamelCase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( __a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] ) lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(__a ) self.assertListEqual(__a , [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] ) lowerCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__a ) self.assertListEqual( __a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , ) def a__ ( self: Tuple )-> str: lowerCamelCase : str = AlbertTokenizer(__a ) lowerCamelCase : Union[str, Any] = tokenizer.encode("""sequence builders""" ) lowerCamelCase : List[Any] = tokenizer.encode("""multi-sequence build""" ) lowerCamelCase : Any = tokenizer.build_inputs_with_special_tokens(__a ) lowerCamelCase : str = tokenizer.build_inputs_with_special_tokens(__a , __a ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ] @slow def a__ ( self: Any )-> Dict: # fmt: off lowerCamelCase : Optional[Any] = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__a , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
42
0
import string from math import logaa def snake_case ( UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] ) -> int: lowerCamelCase : Union[str, Any] = document.translate( str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" , """""" ) lowerCamelCase : Any = document_without_punctuation.split(""" """ ) # word tokenization return len([word for word in tokenize_document if word.lower() == term.lower()] ) def snake_case ( UpperCamelCase__ : Any , UpperCamelCase__ : Dict ) -> tuple[int, int]: lowerCamelCase : Tuple = corpus.lower().translate( str.maketrans("""""" , """""" , string.punctuation ) ) # strip all punctuation and replace it with '' lowerCamelCase : Dict = corpus_without_punctuation.split("""\n""" ) lowerCamelCase : Tuple = term.lower() return (len([doc for doc in docs if term in doc] ), len(__UpperCamelCase )) def snake_case ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int]=False ) -> float: if smoothing: if n == 0: raise ValueError("""log10(0) is undefined.""" ) return round(1 + logaa(n / (1 + df) ) , 3 ) if df == 0: raise ZeroDivisionError("""df must be > 0""" ) elif n == 0: raise ValueError("""log10(0) is undefined.""" ) return round(logaa(n / df ) , 3 ) def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] ) -> float: return round(tf * idf , 3 )
707
"""simple docstring""" __lowerCamelCase :List[Any] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} __lowerCamelCase :Union[str, Any] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def snake_case ( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] ) -> list[int]: lowerCamelCase : Tuple = True lowerCamelCase : Any = [] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) order.append(UpperCamelCase__ ) return order def snake_case ( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] ) -> list[int]: lowerCamelCase : List[Any] = True lowerCamelCase : int = [vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return component def snake_case ( UpperCamelCase__ : dict[int, list[int]] ) -> list[list[int]]: lowerCamelCase : int = len(UpperCamelCase__ ) * [False] lowerCamelCase : dict[int, list[int]] = {vert: [] for vert in range(len(UpperCamelCase__ ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(UpperCamelCase__ ) lowerCamelCase : int = [] for i, was_visited in enumerate(UpperCamelCase__ ): if not was_visited: order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase : Tuple = [] lowerCamelCase : str = len(UpperCamelCase__ ) * [False] for i in range(len(UpperCamelCase__ ) ): lowerCamelCase : Any = order[len(UpperCamelCase__ ) - i - 1] if not visited[vert]: lowerCamelCase : List[str] = find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) components_list.append(UpperCamelCase__ ) return components_list
42
0
"""simple docstring""" import argparse from typing import List import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __lowerCamelCase :int = 16 __lowerCamelCase :int = 32 def lowerCAmelCase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] = 16 ) -> Tuple: lowerCamelCase : Tuple = AutoTokenizer.from_pretrained("""bert-base-cased""" ) lowerCamelCase : Any = DatasetDict( { """train""": dataset["""train"""].select(UpperCamelCase__ ), """validation""": dataset["""train"""].select(UpperCamelCase__ ), """test""": dataset["""validation"""], } ) def tokenize_function(UpperCamelCase__ : Tuple ): # max_length=None => use the model max length (it's actually the default) lowerCamelCase : Tuple = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowerCamelCase : str = datasets.map( UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCamelCase : int = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(UpperCamelCase__ : List[str] ): # On TPU it's best to pad everything to the same length or training will be very slow. lowerCamelCase : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowerCamelCase : Any = 16 elif accelerator.mixed_precision != "no": lowerCamelCase : str = 8 else: lowerCamelCase : Tuple = None return tokenizer.pad( UpperCamelCase__ , padding="""longest""" , max_length=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_tensors="""pt""" , ) # Instantiate dataloaders. lowerCamelCase : Optional[int] = DataLoader( tokenized_datasets["""train"""] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ ) lowerCamelCase : Optional[int] = DataLoader( tokenized_datasets["""validation"""] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ ) lowerCamelCase : Dict = DataLoader( tokenized_datasets["""test"""] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ ) return train_dataloader, eval_dataloader, test_dataloader def lowerCAmelCase ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ) -> Optional[Any]: lowerCamelCase : List[str] = [] # Download the dataset lowerCamelCase : str = load_dataset("""glue""" , """mrpc""" ) # Create our splits lowerCamelCase : List[str] = StratifiedKFold(n_splits=int(args.num_folds ) ) # Initialize accelerator lowerCamelCase : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCamelCase : Tuple = config["""lr"""] lowerCamelCase : Tuple = int(config["""num_epochs"""] ) lowerCamelCase : List[Any] = int(config["""seed"""] ) lowerCamelCase : List[Any] = int(config["""batch_size"""] ) lowerCamelCase : str = evaluate.load("""glue""" , """mrpc""" ) # If the batch size is too big we use gradient accumulation lowerCamelCase : List[Any] = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: lowerCamelCase : Optional[int] = batch_size // MAX_GPU_BATCH_SIZE lowerCamelCase : Tuple = MAX_GPU_BATCH_SIZE set_seed(UpperCamelCase__ ) # New Code # # Create our folds: lowerCamelCase : Any = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] ) lowerCamelCase : Tuple = [] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(UpperCamelCase__ ): lowerCamelCase , lowerCamelCase , lowerCamelCase : List[str] = get_fold_dataloaders( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCamelCase : int = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=UpperCamelCase__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowerCamelCase : Optional[int] = model.to(accelerator.device ) # Instantiate optimizer lowerCamelCase : Tuple = AdamW(params=model.parameters() , lr=UpperCamelCase__ ) # Instantiate scheduler lowerCamelCase : Any = get_linear_schedule_with_warmup( optimizer=UpperCamelCase__ , num_warmup_steps=100 , num_training_steps=(len(UpperCamelCase__ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = accelerator.prepare( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Now we train the model for epoch in range(UpperCamelCase__ ): model.train() for step, batch in enumerate(UpperCamelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) lowerCamelCase : int = model(**UpperCamelCase__ ) lowerCamelCase : Any = outputs.loss lowerCamelCase : Tuple = loss / gradient_accumulation_steps accelerator.backward(UpperCamelCase__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(UpperCamelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowerCamelCase : int = model(**UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = outputs.logits.argmax(dim=-1 ) lowerCamelCase , lowerCamelCase : Optional[int] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=UpperCamelCase__ , references=UpperCamelCase__ , ) lowerCamelCase : Union[str, Any] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'epoch {epoch}:' , UpperCamelCase__ ) # New Code # # We also run predictions on the test set at the very end lowerCamelCase : int = [] for step, batch in enumerate(UpperCamelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowerCamelCase : List[Any] = model(**UpperCamelCase__ ) lowerCamelCase : Dict = outputs.logits lowerCamelCase , lowerCamelCase : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) fold_predictions.append(predictions.cpu() ) if i == 0: # We need all of the test predictions test_references.append(references.cpu() ) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(UpperCamelCase__ , dim=0 ) ) # We now need to release all our memory and get rid of the current model, optimizer, etc accelerator.free_memory() # New Code # # Finally we check the accuracy of our folded results: lowerCamelCase : int = torch.cat(UpperCamelCase__ , dim=0 ) lowerCamelCase : Optional[Any] = torch.stack(UpperCamelCase__ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 ) lowerCamelCase : int = metric.compute(predictions=UpperCamelCase__ , references=UpperCamelCase__ ) accelerator.print("""Average test metrics from all folds:""" , UpperCamelCase__ ) def lowerCAmelCase ( ) -> Any: lowerCamelCase : List[Any] = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=UpperCamelCase__ , default=UpperCamelCase__ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) # New Code # parser.add_argument("""--num_folds""" , type=UpperCamelCase__ , default=3 , help="""The number of splits to perform across the dataset""" ) lowerCamelCase : List[Any] = parser.parse_args() lowerCamelCase : Tuple = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(UpperCamelCase__ , UpperCamelCase__ ) if __name__ == "__main__": main()
708
"""simple docstring""" from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase :str = logging.get_logger(__name__) __lowerCamelCase :Any = { 'huggingface/time-series-transformer-tourism-monthly': ( 'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json' ), # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer } class A__ ( __lowercase): """simple docstring""" snake_case__ : List[Any] ='''time_series_transformer''' snake_case__ : List[Any] ={ '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', '''num_hidden_layers''': '''encoder_layers''', } def __init__( self: List[str] , __a: Optional[int] = None , __a: Optional[int] = None , __a: str = "student_t" , __a: str = "nll" , __a: int = 1 , __a: List[int] = [1, 2, 3, 4, 5, 6, 7] , __a: Optional[Union[str, bool]] = "mean" , __a: int = 0 , __a: int = 0 , __a: int = 0 , __a: int = 0 , __a: Optional[List[int]] = None , __a: Optional[List[int]] = None , __a: int = 32 , __a: int = 32 , __a: int = 2 , __a: int = 2 , __a: int = 2 , __a: int = 2 , __a: bool = True , __a: str = "gelu" , __a: int = 64 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: int = 100 , __a: float = 0.02 , __a: Tuple=True , **__a: str , )-> Any: # time series specific configuration lowerCamelCase : str = prediction_length lowerCamelCase : Optional[Any] = context_length or prediction_length lowerCamelCase : Tuple = distribution_output lowerCamelCase : Any = loss lowerCamelCase : List[Any] = input_size lowerCamelCase : int = num_time_features lowerCamelCase : Dict = lags_sequence lowerCamelCase : Optional[int] = scaling lowerCamelCase : int = num_dynamic_real_features lowerCamelCase : Tuple = num_static_real_features lowerCamelCase : Any = num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(__a ) != num_static_categorical_features: raise ValueError( """The cardinality should be a list of the same length as `num_static_categorical_features`""" ) lowerCamelCase : int = cardinality else: lowerCamelCase : Dict = [0] if embedding_dimension and num_static_categorical_features > 0: if len(__a ) != num_static_categorical_features: raise ValueError( """The embedding dimension should be a list of the same length as `num_static_categorical_features`""" ) lowerCamelCase : str = embedding_dimension else: lowerCamelCase : str = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] lowerCamelCase : Any = num_parallel_samples # Transformer architecture configuration lowerCamelCase : Any = input_size * len(__a ) + self._number_of_features lowerCamelCase : List[str] = d_model lowerCamelCase : Tuple = encoder_attention_heads lowerCamelCase : Optional[int] = decoder_attention_heads lowerCamelCase : Union[str, Any] = encoder_ffn_dim lowerCamelCase : str = decoder_ffn_dim lowerCamelCase : str = encoder_layers lowerCamelCase : Any = decoder_layers lowerCamelCase : Optional[int] = dropout lowerCamelCase : List[str] = attention_dropout lowerCamelCase : Tuple = activation_dropout lowerCamelCase : Optional[int] = encoder_layerdrop lowerCamelCase : int = decoder_layerdrop lowerCamelCase : Optional[int] = activation_function lowerCamelCase : Optional[Any] = init_std lowerCamelCase : Optional[Any] = use_cache super().__init__(is_encoder_decoder=__a , **__a ) @property def a__ ( self: int )-> int: return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
42
0
"""simple docstring""" from scipy.stats import spearmanr import datasets __lowerCamelCase :Any = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n' __lowerCamelCase :Dict = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n' __lowerCamelCase :Any = r'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class A__ ( datasets.Metric): """simple docstring""" def a__ ( self: List[Any] )-> str: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""float""" ), """references""": datasets.Value("""float""" ), } ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , ) def a__ ( self: Dict , __a: Any , __a: Tuple , __a: Union[str, Any]=False )-> Dict: lowerCamelCase : int = spearmanr(_a , _a ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
709
"""simple docstring""" from __future__ import annotations __lowerCamelCase :int = 10 def snake_case ( UpperCamelCase__ : list[int] ) -> list[int]: lowerCamelCase : int = 1 lowerCamelCase : Union[str, Any] = max(UpperCamelCase__ ) while placement <= max_digit: # declare and initialize empty buckets lowerCamelCase : list[list] = [[] for _ in range(UpperCamelCase__ )] # split list_of_ints between the buckets for i in list_of_ints: lowerCamelCase : Any = int((i / placement) % RADIX ) buckets[tmp].append(UpperCamelCase__ ) # put each buckets' contents into list_of_ints lowerCamelCase : Dict = 0 for b in range(UpperCamelCase__ ): for i in buckets[b]: lowerCamelCase : List[str] = i a += 1 # move to next placement *= RADIX return list_of_ints if __name__ == "__main__": import doctest doctest.testmod()
42
0
"""simple docstring""" import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels __lowerCamelCase :Union[str, Any] = object() # For specifying empty leaf dict `{}` __lowerCamelCase :Optional[Any] = object() def snake_case ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int ) -> List[str]: lowerCamelCase : Optional[int] = tuple((re.compile(x + """$""" ) for x in qs) ) for i in range(len(__UpperCamelCase ) - len(__UpperCamelCase ) + 1 ): lowerCamelCase : Tuple = [x.match(__UpperCamelCase ) for x, y in zip(__UpperCamelCase , ks[i:] )] if matches and all(__UpperCamelCase ): return True return False def snake_case ( UpperCamelCase__ : Optional[int] ) -> Optional[int]: def replace(UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ): for rule, replacement in rules: if _match(__UpperCamelCase , __UpperCamelCase ): return replacement return val return replace def snake_case ( ) -> Dict: return [ # embeddings (("transformer", "wpe", "embedding"), P("""mp""" , __UpperCamelCase )), (("transformer", "wte", "embedding"), P("""mp""" , __UpperCamelCase )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__UpperCamelCase , """mp""" )), (("attention", "out_proj", "kernel"), P("""mp""" , __UpperCamelCase )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(__UpperCamelCase , """mp""" )), (("mlp", "c_fc", "bias"), P("""mp""" )), (("mlp", "c_proj", "kernel"), P("""mp""" , __UpperCamelCase )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def snake_case ( UpperCamelCase__ : List[str] ) -> Any: lowerCamelCase : Optional[Any] = _get_partition_rules() lowerCamelCase : Dict = _replacement_rules(__UpperCamelCase ) lowerCamelCase : Optional[int] = {k: _unmatched for k in flatten_dict(__UpperCamelCase )} lowerCamelCase : Tuple = {k: replace(__UpperCamelCase , __UpperCamelCase ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(__UpperCamelCase ) )
710
"""simple docstring""" import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=None ) -> Tuple: # set parameter of one layer assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match' lowerCamelCase : Dict = nn.Parameter(UpperCamelCase__ ) if bias is not None: assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match' lowerCamelCase : Any = nn.Parameter(UpperCamelCase__ ) def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ) -> Union[str, Any]: # set torch weights for 1-to-1 comparison lowerCamelCase : Dict = np.asarray(weights[0] ) lowerCamelCase : List[Any] = np.asarray(weights[1] ) lowerCamelCase : List[str] = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , ) set_param( torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , ) def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any ) -> List[Any]: # set torch weights for 1-to-1 comparison lowerCamelCase : Tuple = np.asarray(weights[0] ) lowerCamelCase : Any = np.asarray(weights[1] ) lowerCamelCase : List[Any] = np.asarray(weights[2] ) lowerCamelCase : List[str] = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , ) set_param( torch_layer.self_attention.key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , ) set_param( torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , ) def snake_case ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ) -> Optional[Any]: # layernorm 1 lowerCamelCase : str = weights[0][0][0] lowerCamelCase : Optional[int] = np.asarray(layer_norm_a[0] ) lowerCamelCase : Tuple = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , ) # lsh weights + output lowerCamelCase : List[Any] = weights[0][1] if len(UpperCamelCase__ ) < 4: set_layer_weights_in_torch_lsh(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ ) else: set_layer_weights_in_torch_local(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ ) # intermediate weighs lowerCamelCase : int = weights[2][0][1][2] # Chunked Feed Forward if len(UpperCamelCase__ ) == 4: lowerCamelCase : Dict = intermediate_weights[2] # layernorm 2 lowerCamelCase : Optional[int] = np.asarray(intermediate_weights[0][0] ) lowerCamelCase : Tuple = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , ) # intermediate dense lowerCamelCase : Optional[Any] = np.asarray(intermediate_weights[1][0] ) lowerCamelCase : Union[str, Any] = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , ) # intermediate out lowerCamelCase : Optional[int] = np.asarray(intermediate_weights[4][0] ) lowerCamelCase : List[Any] = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , ) def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int ) -> List[Any]: # reformer model lowerCamelCase : List[Any] = torch_model.reformer # word embeds lowerCamelCase : Union[str, Any] = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(UpperCamelCase__ ) , ) if isinstance(weights[3] , UpperCamelCase__ ): lowerCamelCase : Union[str, Any] = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): lowerCamelCase : str = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), F'{position_embeddings[emb_idx]} emb does not match' lowerCamelCase : Dict = nn.Parameter(torch.tensor(UpperCamelCase__ ) ) lowerCamelCase : int = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( UpperCamelCase__ ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): lowerCamelCase : Dict = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # output layer norm lowerCamelCase : Any = np.asarray(weights[7][0] ) lowerCamelCase : List[str] = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , ) # output embeddings lowerCamelCase : List[Any] = np.asarray(weights[9][0] ) lowerCamelCase : Optional[int] = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , ) def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str ) -> Optional[int]: # Initialise PyTorch model lowerCamelCase : Union[str, Any] = ReformerConfig.from_json_file(UpperCamelCase__ ) print(F'Building PyTorch model from configuration: {config}' ) lowerCamelCase : str = ReformerModelWithLMHead(UpperCamelCase__ ) with open(UpperCamelCase__ , """rb""" ) as f: lowerCamelCase : str = pickle.load(UpperCamelCase__ )["""weights"""] set_model_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , config.hidden_size ) # Save pytorch-model print(F'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , UpperCamelCase__ ) if __name__ == "__main__": __lowerCamelCase :Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained Reformer model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __lowerCamelCase :Optional[int] = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
42
0
"""simple docstring""" import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class A__ ( unittest.TestCase): """simple docstring""" def __init__( self: Dict , __a: Optional[Any] , __a: Any=7 , __a: List[Any]=3 , __a: str=18 , __a: List[str]=30 , __a: Tuple=400 , __a: str=True , __a: Any=None , __a: Optional[int]=True , )-> Dict: lowerCamelCase : Union[str, Any] = size if size is not None else {"""height""": 18, """width""": 18} lowerCamelCase : List[Any] = parent lowerCamelCase : Optional[int] = batch_size lowerCamelCase : Union[str, Any] = num_channels lowerCamelCase : List[Any] = image_size lowerCamelCase : Optional[Any] = min_resolution lowerCamelCase : str = max_resolution lowerCamelCase : int = do_resize lowerCamelCase : Union[str, Any] = size lowerCamelCase : Any = do_normalize def a__ ( self: Dict )-> Dict: return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04], [-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class A__ ( lowerCAmelCase__ , unittest.TestCase): """simple docstring""" snake_case__ : Any =ImageGPTImageProcessor if is_vision_available() else None def a__ ( self: List[str] )-> str: lowerCamelCase : Union[str, Any] = ImageGPTImageProcessingTester(self ) @property def a__ ( self: Optional[int] )-> List[str]: return self.image_processor_tester.prepare_image_processor_dict() def a__ ( self: Optional[int] )-> int: lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_lowerCamelCase , """clusters""" ) ) self.assertTrue(hasattr(_lowerCamelCase , """do_resize""" ) ) self.assertTrue(hasattr(_lowerCamelCase , """size""" ) ) self.assertTrue(hasattr(_lowerCamelCase , """do_normalize""" ) ) def a__ ( self: Optional[Any] )-> Optional[int]: lowerCamelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} ) lowerCamelCase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} ) def a__ ( self: List[Any] )-> int: lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict ) lowerCamelCase : Tuple = json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(_lowerCamelCase , obj[key] ) ) else: self.assertEqual(obj[key] , _lowerCamelCase ) def a__ ( self: Dict )-> List[str]: lowerCamelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCamelCase : List[str] = os.path.join(_lowerCamelCase , """image_processor.json""" ) image_processor_first.to_json_file(_lowerCamelCase ) lowerCamelCase : Tuple = self.image_processing_class.from_json_file(_lowerCamelCase ).to_dict() lowerCamelCase : str = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(_lowerCamelCase , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , _lowerCamelCase ) def a__ ( self: Tuple )-> Optional[Any]: lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(_lowerCamelCase ) lowerCamelCase : List[Any] = self.image_processing_class.from_pretrained(_lowerCamelCase ).to_dict() lowerCamelCase : Union[str, Any] = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(_lowerCamelCase , image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] , _lowerCamelCase ) @unittest.skip("""ImageGPT requires clusters at initialization""" ) def a__ ( self: Optional[int] )-> List[str]: pass def snake_case ( ) -> Optional[int]: lowerCamelCase : Optional[int] = load_dataset("""hf-internal-testing/fixtures_image_utils""" , split="""test""" ) lowerCamelCase : Any = Image.open(dataset[4]["""file"""] ) lowerCamelCase : Optional[int] = Image.open(dataset[5]["""file"""] ) lowerCamelCase : List[str] = [imagea, imagea] return images @require_vision @require_torch class A__ ( unittest.TestCase): """simple docstring""" @slow def a__ ( self: Optional[int] )-> List[Any]: lowerCamelCase : Optional[Any] = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" ) lowerCamelCase : Tuple = prepare_images() # test non-batched lowerCamelCase : Any = image_processing(images[0] , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (1, 1_024) ) lowerCamelCase : Any = [306, 191, 191] self.assertEqual(encoding.input_ids[0, :3].tolist() , _lowerCamelCase ) # test batched lowerCamelCase : Union[str, Any] = image_processing(_lowerCamelCase , return_tensors="""pt""" ) self.assertIsInstance(encoding.input_ids , torch.LongTensor ) self.assertEqual(encoding.input_ids.shape , (2, 1_024) ) lowerCamelCase : Tuple = [303, 13, 13] self.assertEqual(encoding.input_ids[1, -3:].tolist() , _lowerCamelCase )
711
"""simple docstring""" import inspect import unittest import torch import torch.nn as nn from accelerate.hooks import ( AlignDevicesHook, ModelHook, SequentialHook, add_hook_to_module, attach_align_device_hook, remove_hook_from_module, remove_hook_from_submodules, ) from accelerate.test_utils import require_multi_gpu class A__ ( nn.Module): """simple docstring""" def __init__( self: Dict )-> Dict: super().__init__() lowerCamelCase : Tuple = nn.Linear(3 , 4 ) lowerCamelCase : Optional[Any] = nn.BatchNormad(4 ) lowerCamelCase : Optional[Any] = nn.Linear(4 , 5 ) def a__ ( self: List[str] , __a: List[Any] )-> Optional[Any]: return self.lineara(self.batchnorm(self.lineara(__a ) ) ) class A__ ( __lowercase): """simple docstring""" def a__ ( self: Tuple , __a: int , *__a: Any , **__a: Tuple )-> Tuple: return (args[0] + 1,) + args[1:], kwargs class A__ ( __lowercase): """simple docstring""" def a__ ( self: Optional[int] , __a: List[str] , __a: List[Any] )-> List[str]: return output + 1 class A__ ( unittest.TestCase): """simple docstring""" def a__ ( self: int )-> str: lowerCamelCase : List[str] = ModelForTest() lowerCamelCase : Dict = ModelHook() add_hook_to_module(__a , __a ) self.assertEqual(test_model._hf_hook , __a ) self.assertTrue(hasattr(__a , """_old_forward""" ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , """forward""" ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] ) remove_hook_from_module(__a ) self.assertFalse(hasattr(__a , """_hf_hook""" ) ) self.assertFalse(hasattr(__a , """_old_forward""" ) ) def a__ ( self: int )-> str: lowerCamelCase : List[str] = ModelForTest() lowerCamelCase : Union[str, Any] = ModelHook() add_hook_to_module(__a , __a ) add_hook_to_module(__a , __a , append=__a ) self.assertEqual(isinstance(test_model._hf_hook , __a ) , __a ) self.assertEqual(len(test_model._hf_hook.hooks ) , 2 ) self.assertTrue(hasattr(__a , """_old_forward""" ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , """forward""" ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] ) remove_hook_from_module(__a ) self.assertFalse(hasattr(__a , """_hf_hook""" ) ) self.assertFalse(hasattr(__a , """_old_forward""" ) ) def a__ ( self: List[Any] )-> List[str]: lowerCamelCase : str = ModelForTest() lowerCamelCase : Dict = torch.randn(2 , 3 ) lowerCamelCase : Union[str, Any] = test_model(x + 1 ) lowerCamelCase : Optional[int] = test_model(x + 2 ) lowerCamelCase : List[Any] = PreForwardHook() add_hook_to_module(__a , __a ) lowerCamelCase : Optional[int] = test_model(__a ) self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain lowerCamelCase : Dict = PreForwardHook() add_hook_to_module(__a , __a ) lowerCamelCase : Tuple = test_model(__a ) self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) ) # You need to use the sequential hook to chain two or more hooks lowerCamelCase : Any = SequentialHook(PreForwardHook() , PreForwardHook() ) add_hook_to_module(__a , __a ) lowerCamelCase : Optional[Any] = test_model(__a ) assert torch.allclose(__a , __a , atol=1e-5 ) def a__ ( self: Any )-> Optional[int]: lowerCamelCase : str = ModelForTest() lowerCamelCase : List[str] = torch.randn(2 , 3 ) lowerCamelCase : int = test_model(__a ) lowerCamelCase : Dict = PostForwardHook() add_hook_to_module(__a , __a ) lowerCamelCase : Tuple = test_model(__a ) self.assertTrue(torch.allclose(__a , output + 1 , atol=1e-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain lowerCamelCase : str = PostForwardHook() add_hook_to_module(__a , __a ) lowerCamelCase : Optional[Any] = test_model(__a ) self.assertTrue(torch.allclose(__a , output + 1 , atol=1e-5 ) ) # You need to use the sequential hook to chain two or more hooks lowerCamelCase : Union[str, Any] = SequentialHook(PostForwardHook() , PostForwardHook() ) add_hook_to_module(__a , __a ) lowerCamelCase : str = test_model(__a ) assert torch.allclose(__a , output + 2 , atol=1e-5 ) def a__ ( self: int )-> Dict: lowerCamelCase : List[Any] = ModelForTest() lowerCamelCase : Optional[int] = torch.randn(2 , 3 ) lowerCamelCase : List[str] = test_model(__a ) lowerCamelCase : Any = PostForwardHook() add_hook_to_module(__a , __a ) lowerCamelCase : str = test_model(__a ) self.assertTrue(torch.allclose(__a , output + 1 ) ) self.assertTrue(outputa.requires_grad ) lowerCamelCase : Optional[int] = True lowerCamelCase : Optional[int] = test_model(__a ) self.assertFalse(outputa.requires_grad ) @require_multi_gpu def a__ ( self: List[str] )-> Union[str, Any]: lowerCamelCase : int = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) ) self.assertEqual(model.lineara.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) ) self.assertEqual(model.lineara.weight.device , torch.device(1 ) ) # We can still make a forward pass. The input does not need to be on any particular device lowerCamelCase : str = torch.randn(2 , 3 ) lowerCamelCase : Dict = model(__a ) self.assertEqual(output.device , torch.device(1 ) ) # We can add a general hook to put back output on same device as input. add_hook_to_module(__a , AlignDevicesHook(io_same_device=__a ) ) lowerCamelCase : Optional[int] = torch.randn(2 , 3 ).to(0 ) lowerCamelCase : str = model(__a ) self.assertEqual(output.device , torch.device(0 ) ) def a__ ( self: List[str] )-> Tuple: lowerCamelCase : Union[str, Any] = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices lowerCamelCase : Tuple = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True} add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) # Buffers are not included in the offload by default, so are on the execution device lowerCamelCase : List[Any] = torch.device(hook_kwargs["""execution_device"""] ) self.assertEqual(model.batchnorm.running_mean.device , __a ) lowerCamelCase : Optional[Any] = torch.randn(2 , 3 ) lowerCamelCase : Optional[Any] = model(__a ) self.assertEqual(output.device , __a ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # Now test with buffers included in the offload lowerCamelCase : Any = { """execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True, """offload_buffers""": True, } add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) ) lowerCamelCase : int = torch.randn(2 , 3 ) lowerCamelCase : Optional[int] = model(__a ) self.assertEqual(output.device , __a ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) def a__ ( self: Any )-> List[str]: lowerCamelCase : int = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices lowerCamelCase : int = 0 if torch.cuda.is_available() else """cpu""" attach_align_device_hook(__a , execution_device=__a , offload=__a ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) # Buffers are not included in the offload by default, so are on the execution device lowerCamelCase : List[Any] = torch.device(__a ) self.assertEqual(model.batchnorm.running_mean.device , __a ) lowerCamelCase : Dict = torch.randn(2 , 3 ) lowerCamelCase : Optional[Any] = model(__a ) self.assertEqual(output.device , __a ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(__a ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # Now test with buffers included in the offload attach_align_device_hook(__a , execution_device=__a , offload=__a , offload_buffers=__a ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) ) lowerCamelCase : Optional[int] = torch.randn(2 , 3 ) lowerCamelCase : int = model(__a ) self.assertEqual(output.device , __a ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(__a ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) def a__ ( self: Optional[Any] )-> List[Any]: lowerCamelCase : List[Any] = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices lowerCamelCase : Any = 0 if torch.cuda.is_available() else """cpu""" attach_align_device_hook( __a , execution_device=__a , offload=__a , weights_map=model.state_dict() ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) # Buffers are not included in the offload by default, so are on the execution device lowerCamelCase : List[Any] = torch.device(__a ) self.assertEqual(model.batchnorm.running_mean.device , __a ) lowerCamelCase : Dict = torch.randn(2 , 3 ) lowerCamelCase : int = model(__a ) self.assertEqual(output.device , __a ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(__a ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # Now test with buffers included in the offload attach_align_device_hook( __a , execution_device=__a , offload=__a , weights_map=model.state_dict() , offload_buffers=__a , ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) ) lowerCamelCase : Tuple = torch.randn(2 , 3 ) lowerCamelCase : Any = model(__a ) self.assertEqual(output.device , __a ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(__a ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
42
0
"""simple docstring""" import numpy # List of input, output pairs __lowerCamelCase :int = ( ((5, 2, 3), 15), ((6, 5, 9), 25), ((11, 12, 13), 41), ((1, 1, 1), 8), ((11, 12, 13), 41), ) __lowerCamelCase :int = (((515, 22, 13), 555), ((61, 35, 49), 150)) __lowerCamelCase :List[Any] = [2, 4, 1, 5] __lowerCamelCase :Optional[Any] = len(train_data) __lowerCamelCase :Tuple = 0.009 def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : int="train" ) -> Any: return calculate_hypothesis_value(__snake_case , __snake_case ) - output( __snake_case , __snake_case ) def snake_case ( UpperCamelCase__ : int ) -> Tuple: lowerCamelCase : Union[str, Any] = 0 for i in range(len(__snake_case ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def snake_case ( UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ) -> str: if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : List[str] ) -> Union[str, Any]: if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def snake_case ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=m ) -> Tuple: lowerCamelCase : Union[str, Any] = 0 for i in range(__snake_case ): if index == -1: summation_value += _error(__snake_case ) else: summation_value += _error(__snake_case ) * train_data[i][0][index] return summation_value def snake_case ( UpperCamelCase__ : List[str] ) -> Any: lowerCamelCase : str = summation_of_cost_derivative(__snake_case , __snake_case ) / m return cost_derivative_value def snake_case ( ) -> Union[str, Any]: global parameter_vector # Tune these values to set a tolerance value for predicted output lowerCamelCase : Dict = 0.0_0_0_0_0_2 lowerCamelCase : List[str] = 0 lowerCamelCase : Tuple = 0 while True: j += 1 lowerCamelCase : str = [0, 0, 0, 0] for i in range(0 , len(__snake_case ) ): lowerCamelCase : Any = get_cost_derivative(i - 1 ) lowerCamelCase : str = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( __snake_case , __snake_case , atol=__snake_case , rtol=__snake_case , ): break lowerCamelCase : List[str] = temp_parameter_vector print(("""Number of iterations:""", j) ) def snake_case ( ) -> Union[str, Any]: for i in range(len(__snake_case ) ): print(("""Actual output value:""", output(__snake_case , """test""" )) ) print(("""Hypothesis output:""", calculate_hypothesis_value(__snake_case , """test""" )) ) if __name__ == "__main__": run_gradient_descent() print('\nTesting gradient descent for a linear hypothesis function.\n') test_gradient_descent()
712
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __lowerCamelCase :Optional[Any] = { 'configuration_encodec': [ 'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP', 'EncodecConfig', ], 'feature_extraction_encodec': ['EncodecFeatureExtractor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase :Union[str, Any] = [ 'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST', 'EncodecModel', 'EncodecPreTrainedModel', ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys __lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
42
0
"""simple docstring""" from typing import List import jiwer import jiwer.transforms as tr from packaging import version import datasets from datasets.config import PY_VERSION if PY_VERSION < version.parse('3.8'): import importlib_metadata else: import importlib.metadata as importlib_metadata __lowerCamelCase :Tuple = '' if version.parse(importlib_metadata.version('jiwer')) < version.parse('2.3.0'): class A__ ( tr.AbstractTransform): """simple docstring""" def __init__( self: str , __a: Dict = " " )-> str: lowerCamelCase : str = sentence_delimiter def a__ ( self: Optional[int] , __a: str )-> Optional[Any]: return list(__A ) def a__ ( self: List[Any] , __a: Tuple )-> Any: lowerCamelCase : Any = [] for sent_idx, sentence in enumerate(__A ): chars.extend(self.process_string(__A ) ) if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__A ) - 1: chars.append(self.sentence_delimiter ) return chars __lowerCamelCase :List[str] = tr.Compose( [tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)] ) else: __lowerCamelCase :Union[str, Any] = tr.Compose( [ tr.RemoveMultipleSpaces(), tr.Strip(), tr.ReduceToSingleSentence(SENTENCE_DELIMITER), tr.ReduceToListOfListOfChars(), ] ) __lowerCamelCase :int = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n' __lowerCamelCase :Union[str, Any] = '\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n' __lowerCamelCase :Optional[Any] = '\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> cer = datasets.load_metric("cer")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class A__ ( datasets.Metric): """simple docstring""" def a__ ( self: Tuple )-> Optional[int]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[ """https://en.wikipedia.org/wiki/Word_error_rate""", """https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates""", ] , ) def a__ ( self: Optional[Any] , __a: Tuple , __a: List[str] , __a: List[str]=False )-> str: if concatenate_texts: return jiwer.compute_measures( __A , __A , truth_transform=__A , hypothesis_transform=__A , )["wer"] lowerCamelCase : Any = 0 lowerCamelCase : Dict = 0 for prediction, reference in zip(__A , __A ): lowerCamelCase : str = jiwer.compute_measures( __A , __A , truth_transform=__A , hypothesis_transform=__A , ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
713
"""simple docstring""" import inspect import unittest from transformers import ConvNextConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A__ : """simple docstring""" def __init__( self: Optional[Any] , __a: Union[str, Any] , __a: Optional[Any]=13 , __a: Optional[Any]=32 , __a: Dict=3 , __a: int=4 , __a: Dict=[10, 20, 30, 40] , __a: int=[2, 2, 3, 2] , __a: Any=True , __a: List[Any]=True , __a: Any=37 , __a: Optional[int]="gelu" , __a: List[str]=10 , __a: Optional[int]=0.02 , __a: Dict=["stage2", "stage3", "stage4"] , __a: List[str]=[2, 3, 4] , __a: List[str]=None , )-> Union[str, Any]: lowerCamelCase : Optional[int] = parent lowerCamelCase : Optional[int] = batch_size lowerCamelCase : Any = image_size lowerCamelCase : Tuple = num_channels lowerCamelCase : str = num_stages lowerCamelCase : List[str] = hidden_sizes lowerCamelCase : str = depths lowerCamelCase : Dict = is_training lowerCamelCase : Optional[Any] = use_labels lowerCamelCase : List[str] = intermediate_size lowerCamelCase : List[str] = hidden_act lowerCamelCase : List[str] = num_labels lowerCamelCase : Union[str, Any] = initializer_range lowerCamelCase : List[Any] = out_features lowerCamelCase : Optional[Any] = out_indices lowerCamelCase : int = scope def a__ ( self: str )-> Optional[Any]: lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase : Dict = None if self.use_labels: lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.num_labels ) lowerCamelCase : Any = self.get_config() return config, pixel_values, labels def a__ ( self: Dict )-> Union[str, Any]: return ConvNextConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def a__ ( self: Optional[Any] , __a: List[Any] , __a: Any , __a: int )-> List[Any]: lowerCamelCase : Optional[int] = ConvNextModel(config=__a ) model.to(__a ) model.eval() lowerCamelCase : Any = model(__a ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def a__ ( self: int , __a: Union[str, Any] , __a: List[Any] , __a: Tuple )-> Optional[int]: lowerCamelCase : str = ConvNextForImageClassification(__a ) model.to(__a ) model.eval() lowerCamelCase : Any = model(__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a__ ( self: List[Any] , __a: Any , __a: Optional[int] , __a: Tuple )-> List[str]: lowerCamelCase : List[str] = ConvNextBackbone(config=__a ) model.to(__a ) model.eval() lowerCamelCase : int = model(__a ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None lowerCamelCase : Tuple = None lowerCamelCase : List[str] = ConvNextBackbone(config=__a ) model.to(__a ) model.eval() lowerCamelCase : List[Any] = model(__a ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def a__ ( self: Optional[Any] )-> Any: lowerCamelCase : List[Any] = self.prepare_config_and_inputs() lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = config_and_inputs lowerCamelCase : int = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class A__ ( __lowercase , __lowercase , unittest.TestCase): """simple docstring""" snake_case__ : int =( ( ConvNextModel, ConvNextForImageClassification, ConvNextBackbone, ) if is_torch_available() else () ) snake_case__ : str =( {'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification} if is_torch_available() else {} ) snake_case__ : Union[str, Any] =True snake_case__ : Optional[int] =False snake_case__ : Tuple =False snake_case__ : Union[str, Any] =False snake_case__ : Tuple =False def a__ ( self: Optional[Any] )-> Union[str, Any]: lowerCamelCase : Tuple = ConvNextModelTester(self ) lowerCamelCase : List[Any] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 ) def a__ ( self: Optional[int] )-> Dict: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a__ ( self: Optional[int] )-> Optional[Any]: return @unittest.skip(reason="""ConvNext does not use inputs_embeds""" ) def a__ ( self: int )-> Dict: pass @unittest.skip(reason="""ConvNext does not support input and output embeddings""" ) def a__ ( self: Dict )-> Optional[Any]: pass @unittest.skip(reason="""ConvNext does not use feedforward chunking""" ) def a__ ( self: int )-> List[Any]: pass def a__ ( self: Union[str, Any] )-> int: lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase : Any = model_class(__a ) lowerCamelCase : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase : Optional[Any] = [*signature.parameters.keys()] lowerCamelCase : List[str] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __a ) def a__ ( self: Optional[int] )-> str: lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def a__ ( self: str )-> int: lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__a ) def a__ ( self: int )-> Optional[int]: def check_hidden_states_output(__a: Tuple , __a: int , __a: Tuple ): lowerCamelCase : str = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): lowerCamelCase : Tuple = model(**self._prepare_for_class(__a , __a ) ) lowerCamelCase : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase : Optional[int] = self.model_tester.num_stages self.assertEqual(len(__a ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowerCamelCase , lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase : List[Any] = True check_hidden_states_output(__a , __a , __a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase : Tuple = True check_hidden_states_output(__a , __a , __a ) def a__ ( self: Dict )-> Optional[Any]: lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a ) @slow def a__ ( self: Optional[Any] )-> Tuple: for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase : str = ConvNextModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def snake_case ( ) -> Optional[int]: lowerCamelCase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class A__ ( unittest.TestCase): """simple docstring""" @cached_property def a__ ( self: Dict )-> Union[str, Any]: return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None @slow def a__ ( self: List[str] )-> Dict: lowerCamelCase : Tuple = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(__a ) lowerCamelCase : Dict = self.default_image_processor lowerCamelCase : Union[str, Any] = prepare_img() lowerCamelCase : Optional[Any] = image_processor(images=__a , return_tensors="""pt""" ).to(__a ) # forward pass with torch.no_grad(): lowerCamelCase : Any = model(**__a ) # verify the logits lowerCamelCase : Union[str, Any] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , __a ) lowerCamelCase : Tuple = torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(__a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) ) @require_torch class A__ ( unittest.TestCase , __lowercase): """simple docstring""" snake_case__ : Union[str, Any] =(ConvNextBackbone,) if is_torch_available() else () snake_case__ : Optional[Any] =ConvNextConfig snake_case__ : Optional[Any] =False def a__ ( self: List[str] )-> int: lowerCamelCase : Dict = ConvNextModelTester(self )
42
0
"""simple docstring""" import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class A__ : """simple docstring""" def __init__( self: str , __a: List[Any] , __a: Any=14 , __a: Optional[Any]=7 , __a: Optional[int]=True , __a: List[Any]=True , __a: int=False , __a: str=True , __a: Optional[int]=99 , __a: Union[str, Any]=32 , __a: Union[str, Any]=4 , __a: List[Any]=4 , __a: int=4 , __a: List[str]=37 , __a: int="gelu" , __a: List[Any]=0.1 , __a: List[str]=0.1 , __a: Optional[Any]=512 , __a: int=0.02 , )-> List[Any]: lowerCamelCase : List[Any] = parent lowerCamelCase : Optional[int] = batch_size lowerCamelCase : int = seq_length lowerCamelCase : int = is_training lowerCamelCase : int = use_input_mask lowerCamelCase : Union[str, Any] = use_token_type_ids lowerCamelCase : Dict = use_labels lowerCamelCase : Any = vocab_size lowerCamelCase : int = hidden_size lowerCamelCase : Dict = rotary_dim lowerCamelCase : Optional[Any] = num_hidden_layers lowerCamelCase : List[str] = num_attention_heads lowerCamelCase : List[Any] = intermediate_size lowerCamelCase : int = hidden_act lowerCamelCase : List[Any] = hidden_dropout_prob lowerCamelCase : Tuple = attention_probs_dropout_prob lowerCamelCase : str = max_position_embeddings lowerCamelCase : int = initializer_range lowerCamelCase : Tuple = None lowerCamelCase : List[Any] = vocab_size - 1 lowerCamelCase : Dict = vocab_size - 1 lowerCamelCase : Tuple = vocab_size - 1 def a__ ( self: Optional[Any] )-> Optional[int]: lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase : str = None if self.use_input_mask: lowerCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase : List[Any] = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=__A , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def a__ ( self: Optional[int] )-> List[Any]: lowerCamelCase : str = self.prepare_config_and_inputs() lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = config_and_inputs lowerCamelCase : Tuple = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict def a__ ( self: int , __a: int , __a: Any , __a: Any , __a: Optional[Any] )-> Dict: lowerCamelCase : str = 20 lowerCamelCase : str = model_class_name(__A ) lowerCamelCase : Optional[int] = model.init_cache(input_ids.shape[0] , __A ) lowerCamelCase : Dict = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) lowerCamelCase : Any = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) lowerCamelCase : Tuple = model( input_ids[:, :-1] , attention_mask=__A , past_key_values=__A , position_ids=__A , ) lowerCamelCase : str = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) lowerCamelCase : Any = model( input_ids[:, -1:] , attention_mask=__A , past_key_values=outputs_cache.past_key_values , position_ids=__A , ) lowerCamelCase : Any = model(__A ) lowerCamelCase : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' ) def a__ ( self: Dict , __a: List[Any] , __a: List[str] , __a: Optional[Any] , __a: Union[str, Any] )-> Optional[int]: lowerCamelCase : str = 20 lowerCamelCase : int = model_class_name(__A ) lowerCamelCase : str = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) lowerCamelCase : Tuple = model.init_cache(input_ids.shape[0] , __A ) lowerCamelCase : Any = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) lowerCamelCase : List[str] = model( input_ids[:, :-1] , attention_mask=__A , past_key_values=__A , position_ids=__A , ) lowerCamelCase : List[str] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" ) lowerCamelCase : Tuple = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=__A , position_ids=__A , ) lowerCamelCase : Tuple = model(__A , attention_mask=__A ) lowerCamelCase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' ) @require_flax class A__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase): """simple docstring""" snake_case__ : Union[str, Any] =(FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () snake_case__ : Optional[Any] =(FlaxGPTJForCausalLM,) if is_flax_available() else () def a__ ( self: Tuple )-> Union[str, Any]: lowerCamelCase : Optional[int] = FlaxGPTJModelTester(self ) def a__ ( self: Union[str, Any] )-> int: for model_class_name in self.all_model_classes: lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(__A , __A , __A , __A ) def a__ ( self: Dict )-> Dict: for model_class_name in self.all_model_classes: lowerCamelCase , lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( __A , __A , __A , __A ) @tooslow def a__ ( self: List[str] )-> int: lowerCamelCase : Any = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" ) lowerCamelCase : Union[str, Any] = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=__A , truncation=__A ) lowerCamelCase : int = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" ) lowerCamelCase : int = False lowerCamelCase : Optional[Any] = model.config.eos_token_id lowerCamelCase : Union[str, Any] = jax.jit(model.generate ) lowerCamelCase : str = jit_generate( inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences lowerCamelCase : Union[str, Any] = tokenizer.batch_decode(__A , skip_special_tokens=__A ) lowerCamelCase : List[str] = [ """Hello this is a long string of text.\n\nI'm trying to get the text of the""", """Hey, I'm a little late to the party. I'm going to""", ] self.assertListEqual(__A , __A ) @is_pt_flax_cross_test def a__ ( self: Any )-> str: lowerCamelCase , lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs lowerCamelCase : Any = self._prepare_for_class(__A , __A ) lowerCamelCase : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class lowerCamelCase : List[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning lowerCamelCase : int = getattr(__A , __A ) lowerCamelCase , lowerCamelCase : Optional[int] = pt_inputs["""input_ids"""].shape lowerCamelCase : List[str] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(__A ): lowerCamelCase : Dict = 0 lowerCamelCase : List[str] = 1 lowerCamelCase : Any = 0 lowerCamelCase : List[str] = 1 lowerCamelCase : Optional[Any] = pt_model_class(__A ).eval() lowerCamelCase : Optional[Any] = model_class(__A , dtype=jnp.floataa ) lowerCamelCase : int = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __A ) lowerCamelCase : Any = fx_state with torch.no_grad(): lowerCamelCase : int = pt_model(**__A ).to_tuple() lowerCamelCase : Any = fx_model(**__A ).to_tuple() self.assertEqual(len(__A ) , len(__A ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(__A , __A ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(__A ) lowerCamelCase : Any = model_class.from_pretrained(__A , from_pt=__A ) lowerCamelCase : int = fx_model_loaded(**__A ).to_tuple() self.assertEqual( len(__A ) , len(__A ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output_loaded, pt_output in zip(__A , __A ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @is_pt_flax_cross_test def a__ ( self: Any )-> Dict: lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs lowerCamelCase : List[Any] = self._prepare_for_class(__A , __A ) lowerCamelCase : List[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class lowerCamelCase : List[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning lowerCamelCase : Union[str, Any] = getattr(__A , __A ) lowerCamelCase : List[str] = pt_model_class(__A ).eval() lowerCamelCase : Tuple = model_class(__A , dtype=jnp.floataa ) lowerCamelCase : Optional[Any] = load_flax_weights_in_pytorch_model(__A , fx_model.params ) lowerCamelCase , lowerCamelCase : str = pt_inputs["""input_ids"""].shape lowerCamelCase : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(__A ): lowerCamelCase : Union[str, Any] = 0 lowerCamelCase : int = 1 lowerCamelCase : Dict = 0 lowerCamelCase : str = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): lowerCamelCase : Union[str, Any] = pt_model(**__A ).to_tuple() lowerCamelCase : Optional[int] = fx_model(**__A ).to_tuple() self.assertEqual(len(__A ) , len(__A ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(__A , __A ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(__A ) lowerCamelCase : Dict = pt_model_class.from_pretrained(__A , from_flax=__A ) with torch.no_grad(): lowerCamelCase : int = pt_model_loaded(**__A ).to_tuple() self.assertEqual( len(__A ) , len(__A ) , """Output lengths differ between Flax and PyTorch""" ) for fx_output, pt_output in zip(__A , __A ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 ) @tooslow def a__ ( self: Union[str, Any] )-> Tuple: for model_class_name in self.all_model_classes: lowerCamelCase : Union[str, Any] = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" ) lowerCamelCase : Optional[int] = model(np.ones((1, 1) ) ) self.assertIsNotNone(__A )
714
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase :Optional[int] = logging.get_logger(__name__) __lowerCamelCase :List[str] = { 'google/realm-cc-news-pretrained-embedder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-encoder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-scorer': ( 'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-openqa': ( 'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json' ), 'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json', 'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json', 'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json', 'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json', # See all REALM models at https://huggingface.co/models?filter=realm } class A__ ( __lowercase): """simple docstring""" snake_case__ : Optional[Any] ='''realm''' def __init__( self: Union[str, Any] , __a: List[Any]=30_522 , __a: List[Any]=768 , __a: List[Any]=128 , __a: Union[str, Any]=12 , __a: Union[str, Any]=12 , __a: Optional[Any]=8 , __a: Dict=3_072 , __a: List[Any]="gelu_new" , __a: List[Any]=0.1 , __a: Tuple=0.1 , __a: Optional[Any]=512 , __a: Optional[int]=2 , __a: str=0.02 , __a: int=1e-1_2 , __a: Optional[Any]=256 , __a: Any=10 , __a: Dict=1e-3 , __a: Optional[Any]=5 , __a: Dict=320 , __a: Tuple=13_353_718 , __a: List[Any]=5_000 , __a: Dict=1 , __a: int=0 , __a: Dict=2 , **__a: List[str] , )-> Any: super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a ) # Common config lowerCamelCase : Optional[Any] = vocab_size lowerCamelCase : str = max_position_embeddings lowerCamelCase : Dict = hidden_size lowerCamelCase : Dict = retriever_proj_size lowerCamelCase : Optional[Any] = num_hidden_layers lowerCamelCase : List[str] = num_attention_heads lowerCamelCase : Tuple = num_candidates lowerCamelCase : int = intermediate_size lowerCamelCase : Dict = hidden_act lowerCamelCase : List[str] = hidden_dropout_prob lowerCamelCase : Dict = attention_probs_dropout_prob lowerCamelCase : Optional[int] = initializer_range lowerCamelCase : Dict = type_vocab_size lowerCamelCase : Optional[Any] = layer_norm_eps # Reader config lowerCamelCase : List[str] = span_hidden_size lowerCamelCase : Dict = max_span_width lowerCamelCase : Optional[Any] = reader_layer_norm_eps lowerCamelCase : Optional[int] = reader_beam_size lowerCamelCase : List[Any] = reader_seq_len # Retrieval config lowerCamelCase : int = num_block_records lowerCamelCase : Dict = searcher_beam_size
42
0
"""simple docstring""" import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration __lowerCamelCase :str = [ # tf -> hf ('''/''', '''.'''), ('''layer_''', '''layers.'''), ('''kernel''', '''weight'''), ('''beta''', '''bias'''), ('''gamma''', '''weight'''), ('''pegasus''', '''model'''), ] __lowerCamelCase :Optional[Any] = [ ('''.output.dense''', '''.fc2'''), ('''intermediate.LayerNorm''', '''final_layer_norm'''), ('''intermediate.dense''', '''fc1'''), ] __lowerCamelCase :Optional[Any] = ( INIT_COMMON + [ ('''attention.self.LayerNorm''', '''self_attn_layer_norm'''), ('''attention.output.dense''', '''self_attn.out_proj'''), ('''attention.self''', '''self_attn'''), ('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''), ('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''), ('''attention.encdec''', '''encoder_attn'''), ('''key''', '''k_proj'''), ('''value''', '''v_proj'''), ('''query''', '''q_proj'''), ('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''), ] + END_COMMON ) __lowerCamelCase :Optional[Any] = ( INIT_COMMON + [ ('''embeddings.word_embeddings''', '''shared.weight'''), ('''embeddings.position_embeddings''', '''embed_positions.weight'''), ('''attention.self.LayerNorm''', '''self_attn_layer_norm'''), ('''attention.output.dense''', '''self_attn.output'''), ('''attention.self''', '''self_attn.self'''), ('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''), ] + END_COMMON ) __lowerCamelCase :Tuple = [ '''encdec/key/bias''', '''encdec/query/bias''', '''encdec/value/bias''', '''self/key/bias''', '''self/query/bias''', '''self/value/bias''', '''encdec_output/dense/bias''', '''attention/output/dense/bias''', ] def snake_case ( UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] ) -> Optional[Any]: for tf_name, hf_name in patterns: lowerCamelCase : Optional[Any] = k.replace(_UpperCAmelCase , _UpperCAmelCase ) return k def snake_case ( UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] ) -> List[Any]: lowerCamelCase : Optional[int] = BigBirdPegasusConfig(**_UpperCAmelCase ) lowerCamelCase : List[Any] = BigBirdPegasusForConditionalGeneration(_UpperCAmelCase ) lowerCamelCase : Any = torch_model.state_dict() lowerCamelCase : int = {} # separating decoder weights lowerCamelCase : Optional[Any] = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )} lowerCamelCase : int = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )} for k, v in tqdm(decoder_weights.items() , """tf -> hf conversion""" ): lowerCamelCase : Any = [k.endswith(_UpperCAmelCase ) for ending in KEYS_TO_IGNORE] if any(_UpperCAmelCase ): continue lowerCamelCase : List[str] = DECODER_PATTERNS lowerCamelCase : Union[str, Any] = rename_state_dict_key(_UpperCAmelCase , _UpperCAmelCase ) if new_k not in state_dict: raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' ) if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ): lowerCamelCase : Union[str, Any] = v.T lowerCamelCase : Tuple = torch.from_numpy(_UpperCAmelCase ) assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}' for k, v in tqdm(remaining_weights.items() , """tf -> hf conversion""" ): lowerCamelCase : str = [k.endswith(_UpperCAmelCase ) for ending in KEYS_TO_IGNORE] if any(_UpperCAmelCase ): continue lowerCamelCase : str = REMAINING_PATTERNS lowerCamelCase : Union[str, Any] = rename_state_dict_key(_UpperCAmelCase , _UpperCAmelCase ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' ) if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ): lowerCamelCase : int = v.T lowerCamelCase : List[str] = torch.from_numpy(_UpperCAmelCase ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}' lowerCamelCase : Union[str, Any] = mapping["""model.embed_positions.weight"""] lowerCamelCase : str = mapping.pop("""model.embed_positions.weight""" ) lowerCamelCase , lowerCamelCase : int = torch_model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase ) lowerCamelCase : Optional[Any] = [ k for k in missing if k not in [ """final_logits_bias""", """model.encoder.embed_tokens.weight""", """model.decoder.embed_tokens.weight""", """lm_head.weight""", ] ] assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}' assert extra == [], F'no matches found for the following tf keys {extra}' return torch_model def snake_case ( UpperCamelCase__ : List[Any] ) -> Dict: lowerCamelCase : List[str] = tf.train.list_variables(_UpperCAmelCase ) lowerCamelCase : Optional[Any] = {} lowerCamelCase : List[str] = ["""global_step"""] for name, shape in tqdm(_UpperCAmelCase , desc="""converting tf checkpoint to dict""" ): lowerCamelCase : Dict = any(pat in name for pat in ignore_name ) if skip_key: continue lowerCamelCase : Optional[int] = tf.train.load_variable(_UpperCAmelCase , _UpperCAmelCase ) lowerCamelCase : int = array return tf_weights def snake_case ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict ) -> Dict: lowerCamelCase : str = get_tf_weights_as_numpy(_UpperCAmelCase ) lowerCamelCase : Optional[Any] = convert_bigbird_pegasus(_UpperCAmelCase , _UpperCAmelCase ) torch_model.save_pretrained(_UpperCAmelCase ) if __name__ == "__main__": __lowerCamelCase :Optional[Any] = argparse.ArgumentParser() parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables') parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.') __lowerCamelCase :int = parser.parse_args() __lowerCamelCase :str = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
715
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase :Tuple = logging.get_logger(__name__) __lowerCamelCase :Any = { 'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json', # See all GLPN models at https://huggingface.co/models?filter=glpn } class A__ ( __lowercase): """simple docstring""" snake_case__ : Tuple ='''glpn''' def __init__( self: Dict , __a: List[str]=3 , __a: Optional[int]=4 , __a: Dict=[2, 2, 2, 2] , __a: str=[8, 4, 2, 1] , __a: Optional[int]=[32, 64, 160, 256] , __a: Dict=[7, 3, 3, 3] , __a: Dict=[4, 2, 2, 2] , __a: Optional[Any]=[1, 2, 5, 8] , __a: Tuple=[4, 4, 4, 4] , __a: int="gelu" , __a: Union[str, Any]=0.0 , __a: str=0.0 , __a: Union[str, Any]=0.02 , __a: str=0.1 , __a: Union[str, Any]=1e-6 , __a: Any=64 , __a: Dict=10 , __a: Union[str, Any]=-1 , **__a: Optional[Any] , )-> Dict: super().__init__(**__a ) lowerCamelCase : Dict = num_channels lowerCamelCase : Any = num_encoder_blocks lowerCamelCase : Dict = depths lowerCamelCase : List[str] = sr_ratios lowerCamelCase : Dict = hidden_sizes lowerCamelCase : Tuple = patch_sizes lowerCamelCase : Optional[int] = strides lowerCamelCase : Optional[Any] = mlp_ratios lowerCamelCase : Union[str, Any] = num_attention_heads lowerCamelCase : List[str] = hidden_act lowerCamelCase : Any = hidden_dropout_prob lowerCamelCase : Optional[int] = attention_probs_dropout_prob lowerCamelCase : List[Any] = initializer_range lowerCamelCase : Dict = drop_path_rate lowerCamelCase : Any = layer_norm_eps lowerCamelCase : Optional[Any] = decoder_hidden_size lowerCamelCase : Tuple = max_depth lowerCamelCase : Optional[Any] = head_in_index
42
0
"""simple docstring""" import cva import numpy as np class A__ : """simple docstring""" def __init__( self: Tuple , __a: Dict , __a: int )-> int: if k in (0.04, 0.06): lowerCamelCase : List[str] = k lowerCamelCase : str = window_size else: raise ValueError("""invalid k value""" ) def __str__( self: List[str] )-> str: return str(self.k ) def a__ ( self: Optional[Any] , __a: Dict )-> tuple[cva.Mat, list[list[int]]]: lowerCamelCase : Any = cva.imread(__a , 0 ) lowerCamelCase : Optional[Any] = img.shape lowerCamelCase : list[list[int]] = [] lowerCamelCase : str = img.copy() lowerCamelCase : Optional[int] = cva.cvtColor(__a , cva.COLOR_GRAY2RGB ) lowerCamelCase : List[str] = np.gradient(__a ) lowerCamelCase : Optional[Any] = dx**2 lowerCamelCase : int = dy**2 lowerCamelCase : List[str] = dx * dy lowerCamelCase : Any = 0.04 lowerCamelCase : List[Any] = self.window_size // 2 for y in range(__a , h - offset ): for x in range(__a , w - offset ): lowerCamelCase : Union[str, Any] = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase : int = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase : List[Any] = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() lowerCamelCase : Dict = (wxx * wyy) - (wxy**2) lowerCamelCase : List[str] = wxx + wyy lowerCamelCase : Tuple = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 255 ) return color_img, corner_list if __name__ == "__main__": __lowerCamelCase :str = HarrisCorner(0.04, 3) __lowerCamelCase , __lowerCamelCase :int = edge_detect.detect('path_to_image') cva.imwrite('detect.png', color_img)
716
"""simple docstring""" from __future__ import annotations import math def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : int ) -> float: lowerCamelCase : Dict = u for i in range(1 , UpperCamelCase__ ): lowerCamelCase : List[str] = temp * (u - i) return temp def snake_case ( ) -> None: lowerCamelCase : List[Any] = int(input("""enter the numbers of values: """ ) ) lowerCamelCase : list[list[float]] = [] for _ in range(UpperCamelCase__ ): y.append([] ) for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): y[i].append(UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = 0 print("""enter the values of parameters in a list: """ ) lowerCamelCase : Any = list(map(UpperCamelCase__ , input().split() ) ) print("""enter the values of corresponding parameters: """ ) for i in range(UpperCamelCase__ ): lowerCamelCase : int = float(input() ) lowerCamelCase : Dict = int(input("""enter the value to interpolate: """ ) ) lowerCamelCase : List[Any] = (value - x[0]) / (x[1] - x[0]) # for calculating forward difference table for i in range(1 , UpperCamelCase__ ): for j in range(n - i ): lowerCamelCase : str = y[j + 1][i - 1] - y[j][i - 1] lowerCamelCase : Any = y[0][0] for i in range(1 , UpperCamelCase__ ): summ += (ucal(UpperCamelCase__ , UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ ) print(F'the value at {value} is {summ}' ) if __name__ == "__main__": main()
42
0
"""simple docstring""" def snake_case ( UpperCamelCase__ : str ) -> Any: if any(not isinstance(_lowercase , _lowercase ) or x < 0 for x in sequence ): raise TypeError("""Sequence must be list of non-negative integers""" ) for _ in range(len(_lowercase ) ): for i, (rod_upper, rod_lower) in enumerate(zip(_lowercase , sequence[1:] ) ): if rod_upper > rod_lower: sequence[i] -= rod_upper - rod_lower sequence[i + 1] += rod_upper - rod_lower return sequence if __name__ == "__main__": assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
717
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available __lowerCamelCase :str = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase :Optional[Any] = ['GPTSw3Tokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_swa import GPTSwaTokenizer else: import sys __lowerCamelCase :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
42
0
"""simple docstring""" def snake_case ( UpperCamelCase__ : Any , UpperCamelCase__ : List[str] ) -> Optional[Any]: lowerCamelCase : List[str] = 0 lowerCamelCase : Tuple = len(_lowerCamelCase ) - 1 while left <= right: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None lowerCamelCase : str = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(_lowerCamelCase ): return None lowerCamelCase : List[Any] = sorted_collection[point] if current_item == item: return point else: if point < left: lowerCamelCase : List[Any] = left lowerCamelCase : Any = point elif point > right: lowerCamelCase : List[Any] = right lowerCamelCase : Tuple = point else: if item < current_item: lowerCamelCase : int = point - 1 else: lowerCamelCase : str = point + 1 return None def snake_case ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int ) -> Optional[int]: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None lowerCamelCase : List[str] = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(_lowerCamelCase ): return None if sorted_collection[point] == item: return point elif point < left: return interpolation_search_by_recursion(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) elif point > right: return interpolation_search_by_recursion(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) else: if sorted_collection[point] > item: return interpolation_search_by_recursion( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , point - 1 ) else: return interpolation_search_by_recursion( _lowerCamelCase , _lowerCamelCase , point + 1 , _lowerCamelCase ) def snake_case ( UpperCamelCase__ : Optional[Any] ) -> List[Any]: if collection != sorted(_lowerCamelCase ): raise ValueError("""Collection must be ascending sorted""" ) return True if __name__ == "__main__": import sys __lowerCamelCase :str = 0 if debug == 1: __lowerCamelCase :Optional[Any] = [10, 30, 40, 45, 50, 66, 77, 93] try: __assert_sorted(collection) except ValueError: sys.exit('Sequence must be ascending sorted to apply interpolation search') __lowerCamelCase :Dict = 67 __lowerCamelCase :Dict = interpolation_search(collection, target) if result is not None: print(F"""{target} found at positions: {result}""") else: print('Not found')
718
"""simple docstring""" import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() __lowerCamelCase :Dict = logging.get_logger() def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : LevitConfig , UpperCamelCase__ : Path , UpperCamelCase__ : bool = True ) -> Dict: print(F'Converting {name}...' ) with torch.no_grad(): if hidden_sizes == 128: if name[-1] == "S": lowerCamelCase : Optional[Any] = timm.create_model("""levit_128s""" , pretrained=UpperCamelCase__ ) else: lowerCamelCase : Dict = timm.create_model("""levit_128""" , pretrained=UpperCamelCase__ ) if hidden_sizes == 192: lowerCamelCase : Tuple = timm.create_model("""levit_192""" , pretrained=UpperCamelCase__ ) if hidden_sizes == 256: lowerCamelCase : Optional[int] = timm.create_model("""levit_256""" , pretrained=UpperCamelCase__ ) if hidden_sizes == 384: lowerCamelCase : Dict = timm.create_model("""levit_384""" , pretrained=UpperCamelCase__ ) from_model.eval() lowerCamelCase : Optional[Any] = LevitForImageClassificationWithTeacher(UpperCamelCase__ ).eval() lowerCamelCase : Tuple = OrderedDict() lowerCamelCase : Optional[Any] = from_model.state_dict() lowerCamelCase : str = list(from_model.state_dict().keys() ) lowerCamelCase : List[Any] = list(our_model.state_dict().keys() ) print(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) ) for i in range(len(UpperCamelCase__ ) ): lowerCamelCase : str = weights[og_keys[i]] our_model.load_state_dict(UpperCamelCase__ ) lowerCamelCase : int = torch.randn((2, 3, 224, 224) ) lowerCamelCase : Any = from_model(UpperCamelCase__ ) lowerCamelCase : List[Any] = our_model(UpperCamelCase__ ).logits assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ ), "The model logits don't match the original one." lowerCamelCase : Dict = name print(UpperCamelCase__ ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) lowerCamelCase : Optional[int] = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(F'Pushed {checkpoint_name}' ) def snake_case ( UpperCamelCase__ : Path , UpperCamelCase__ : str = None , UpperCamelCase__ : bool = True ) -> Optional[int]: lowerCamelCase : Optional[Any] = """imagenet-1k-id2label.json""" lowerCamelCase : List[Any] = 1000 lowerCamelCase : Dict = (1, num_labels) lowerCamelCase : List[Any] = """huggingface/label-files""" lowerCamelCase : Optional[int] = num_labels lowerCamelCase : List[str] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) ) lowerCamelCase : Any = {int(UpperCamelCase__ ): v for k, v in idalabel.items()} lowerCamelCase : List[Any] = idalabel lowerCamelCase : str = {v: k for k, v in idalabel.items()} lowerCamelCase : Tuple = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ ) lowerCamelCase : Optional[int] = { """levit-128S""": 128, """levit-128""": 128, """levit-192""": 192, """levit-256""": 256, """levit-384""": 384, } lowerCamelCase : List[Any] = { """levit-128S""": ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), """levit-128""": ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), """levit-192""": ImageNetPreTrainedConfig( hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), """levit-256""": ImageNetPreTrainedConfig( hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), """levit-384""": ImageNetPreTrainedConfig( hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] , UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return config, expected_shape if __name__ == "__main__": __lowerCamelCase :Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default=None, type=str, help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,', ) parser.add_argument( '--pytorch_dump_folder_path', default='levit-dump-folder/', type=Path, required=False, help='Path to the output PyTorch model directory.', ) parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') parser.add_argument( '--no-push_to_hub', dest='push_to_hub', action='store_false', help='Do not push model and image processor to the hub', ) __lowerCamelCase :List[Any] = parser.parse_args() __lowerCamelCase :Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
42
0
"""simple docstring""" from pathlib import Path import cva import numpy as np from matplotlib import pyplot as plt def snake_case ( UpperCamelCase__ : np.ndarray , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> np.ndarray: lowerCamelCase : Any = cva.getAffineTransform(lowercase_ , lowercase_ ) return cva.warpAffine(lowercase_ , lowercase_ , (rows, cols) ) if __name__ == "__main__": # read original image __lowerCamelCase :List[Any] = cva.imread( str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg') ) # turn image in gray scale value __lowerCamelCase :List[Any] = cva.cvtColor(image, cva.COLOR_BGR2GRAY) # get image shape __lowerCamelCase :Optional[Any] = gray_img.shape # set different points to rotate image __lowerCamelCase :str = np.array([[50, 50], [200, 50], [50, 200]], np.floataa) __lowerCamelCase :Dict = np.array([[10, 100], [200, 50], [100, 250]], np.floataa) __lowerCamelCase :Optional[int] = np.array([[50, 50], [150, 50], [120, 200]], np.floataa) __lowerCamelCase :Optional[int] = np.array([[10, 100], [80, 50], [180, 250]], np.floataa) # add all rotated images in a list __lowerCamelCase :Dict = [ gray_img, get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols), ] # plot different image rotations __lowerCamelCase :Union[str, Any] = plt.figure(1) __lowerCamelCase :Union[str, Any] = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3'] for i, image in enumerate(images): plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray') plt.title(titles[i]) plt.axis('off') plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95) plt.show()
719
"""simple docstring""" import torch from diffusers import KDPMaDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class A__ ( __lowercase): """simple docstring""" snake_case__ : Tuple =(KDPMaDiscreteScheduler,) snake_case__ : Tuple =10 def a__ ( self: List[Any] , **__a: Optional[int] )-> Union[str, Any]: lowerCamelCase : int = { """num_train_timesteps""": 1_100, """beta_start""": 0.00_01, """beta_end""": 0.02, """beta_schedule""": """linear""", } config.update(**__a ) return config def a__ ( self: Union[str, Any] )-> Any: for timesteps in [10, 50, 100, 1_000]: self.check_over_configs(num_train_timesteps=__a ) def a__ ( self: str )-> int: for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ): self.check_over_configs(beta_start=__a , beta_end=__a ) def a__ ( self: int )-> Union[str, Any]: for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=__a ) def a__ ( self: List[Any] )-> List[Any]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__a ) def a__ ( self: Union[str, Any] )-> int: lowerCamelCase : List[str] = self.scheduler_classes[0] lowerCamelCase : Union[str, Any] = self.get_scheduler_config(prediction_type="""v_prediction""" ) lowerCamelCase : List[str] = scheduler_class(**__a ) scheduler.set_timesteps(self.num_inference_steps ) lowerCamelCase : Dict = self.dummy_model() lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCamelCase : List[Any] = sample.to(__a ) for i, t in enumerate(scheduler.timesteps ): lowerCamelCase : Optional[Any] = scheduler.scale_model_input(__a , __a ) lowerCamelCase : Optional[int] = model(__a , __a ) lowerCamelCase : Tuple = scheduler.step(__a , __a , __a ) lowerCamelCase : Optional[Any] = output.prev_sample lowerCamelCase : List[str] = torch.sum(torch.abs(__a ) ) lowerCamelCase : Tuple = torch.mean(torch.abs(__a ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 4.6_9_3_4e-0_7 ) < 1e-2 assert abs(result_mean.item() - 6.1_1_1_2e-1_0 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7 ) < 1e-2 assert abs(result_mean.item() - 0.00_02 ) < 1e-3 def a__ ( self: Any )-> Any: if torch_device == "mps": return lowerCamelCase : Dict = self.scheduler_classes[0] lowerCamelCase : Dict = self.get_scheduler_config() lowerCamelCase : int = scheduler_class(**__a ) scheduler.set_timesteps(self.num_inference_steps ) lowerCamelCase : List[Any] = self.dummy_model() lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCamelCase : Optional[int] = sample.to(__a ) for i, t in enumerate(scheduler.timesteps ): lowerCamelCase : Dict = scheduler.scale_model_input(__a , __a ) lowerCamelCase : Optional[Any] = model(__a , __a ) lowerCamelCase : Tuple = scheduler.step(__a , __a , __a ) lowerCamelCase : str = output.prev_sample lowerCamelCase : Tuple = torch.sum(torch.abs(__a ) ) lowerCamelCase : Tuple = torch.mean(torch.abs(__a ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 20.41_25 ) < 1e-2 assert abs(result_mean.item() - 0.02_66 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 20.41_25 ) < 1e-2 assert abs(result_mean.item() - 0.02_66 ) < 1e-3 def a__ ( self: Optional[Any] )-> List[Any]: if torch_device == "mps": return lowerCamelCase : Any = self.scheduler_classes[0] lowerCamelCase : Union[str, Any] = self.get_scheduler_config() lowerCamelCase : Optional[Any] = scheduler_class(**__a ) scheduler.set_timesteps(self.num_inference_steps , device=__a ) lowerCamelCase : Union[str, Any] = self.dummy_model() lowerCamelCase : List[str] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma for t in scheduler.timesteps: lowerCamelCase : Union[str, Any] = scheduler.scale_model_input(__a , __a ) lowerCamelCase : Optional[int] = model(__a , __a ) lowerCamelCase : int = scheduler.step(__a , __a , __a ) lowerCamelCase : int = output.prev_sample lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(__a ) ) lowerCamelCase : int = torch.mean(torch.abs(__a ) ) if str(__a ).startswith("""cpu""" ): # The following sum varies between 148 and 156 on mps. Why? assert abs(result_sum.item() - 20.41_25 ) < 1e-2 assert abs(result_mean.item() - 0.02_66 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 20.41_25 ) < 1e-2 assert abs(result_mean.item() - 0.02_66 ) < 1e-3
42
0
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_camembert import CamembertTokenizer else: __lowerCamelCase :Any = None __lowerCamelCase :Union[str, Any] = logging.get_logger(__name__) __lowerCamelCase :str = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} __lowerCamelCase :List[str] = { 'vocab_file': { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model', }, 'tokenizer_file': { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json', }, } __lowerCamelCase :List[str] = { 'camembert-base': 512, } __lowerCamelCase :Tuple = '▁' class A__ ( __SCREAMING_SNAKE_CASE): """simple docstring""" snake_case__ : int =VOCAB_FILES_NAMES snake_case__ : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP snake_case__ : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ : Dict =['input_ids', 'attention_mask'] snake_case__ : int =CamembertTokenizer def __init__( self: List[str] , __a: Optional[Any]=None , __a: Any=None , __a: Optional[Any]="<s>" , __a: Tuple="</s>" , __a: int="</s>" , __a: Union[str, Any]="<s>" , __a: Tuple="<unk>" , __a: Union[str, Any]="<pad>" , __a: List[Any]="<mask>" , __a: Tuple=["<s>NOTUSED", "</s>NOTUSED"] , **__a: str , )-> Optional[int]: # Mask token behave like a normal word, i.e. include the space before it lowerCamelCase : Dict = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token super().__init__( _a , tokenizer_file=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , additional_special_tokens=_a , **_a , ) lowerCamelCase : int = vocab_file lowerCamelCase : int = False if not self.vocab_file else True def a__ ( self: Optional[int] , __a: Tuple , __a: Tuple = None )-> int: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCamelCase : str = [self.cls_token_id] lowerCamelCase : Any = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def a__ ( self: Tuple , __a: int , __a: str = None )-> int: lowerCamelCase : Tuple = [self.sep_token_id] lowerCamelCase : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def a__ ( self: Union[str, Any] , __a: str , __a: List[str] = None )-> Union[str, Any]: if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(_a ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return lowerCamelCase : Tuple = os.path.join( _a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ): copyfile(self.vocab_file , _a ) return (out_vocab_file,)
720
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class A__ ( __lowercase , __lowercase , unittest.TestCase): """simple docstring""" snake_case__ : str =StableDiffusionXLImgaImgPipeline snake_case__ : Any =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''} snake_case__ : Optional[int] =PipelineTesterMixin.required_optional_params - {'''latents'''} snake_case__ : Dict =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS snake_case__ : Tuple =IMAGE_TO_IMAGE_IMAGE_PARAMS snake_case__ : List[str] =IMAGE_TO_IMAGE_IMAGE_PARAMS def a__ ( self: List[str] )-> int: torch.manual_seed(0 ) lowerCamelCase : Any = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=__a , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , ) lowerCamelCase : Any = EulerDiscreteScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , ) torch.manual_seed(0 ) lowerCamelCase : Any = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) lowerCamelCase : Optional[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=32 , ) lowerCamelCase : Dict = CLIPTextModel(__a ) lowerCamelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__a ) lowerCamelCase : Dict = CLIPTextModelWithProjection(__a ) lowerCamelCase : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__a ) lowerCamelCase : str = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """text_encoder_2""": text_encoder_a, """tokenizer_2""": tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def a__ ( self: Any , __a: str , __a: Tuple=0 )-> Union[str, Any]: lowerCamelCase : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a ) lowerCamelCase : Any = image / 2 + 0.5 if str(__a ).startswith("""mps""" ): lowerCamelCase : Dict = torch.manual_seed(__a ) else: lowerCamelCase : Tuple = torch.Generator(device=__a ).manual_seed(__a ) lowerCamelCase : Tuple = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 5.0, """output_type""": """numpy""", """strength""": 0.75, } return inputs def a__ ( self: Dict )-> Optional[Any]: lowerCamelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator lowerCamelCase : Union[str, Any] = self.get_dummy_components() lowerCamelCase : Optional[int] = StableDiffusionXLImgaImgPipeline(**__a ) lowerCamelCase : int = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) lowerCamelCase : Optional[Any] = self.get_dummy_inputs(__a ) lowerCamelCase : Optional[int] = sd_pipe(**__a ).images lowerCamelCase : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCamelCase : Any = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def a__ ( self: Optional[int] )-> Union[str, Any]: super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 ) def a__ ( self: Optional[Any] )-> str: super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def a__ ( self: List[str] )-> Optional[Any]: pass def a__ ( self: List[Any] )-> Union[str, Any]: lowerCamelCase : Tuple = self.get_dummy_components() lowerCamelCase : Union[str, Any] = StableDiffusionXLImgaImgPipeline(**__a ) lowerCamelCase : str = sd_pipe.to(__a ) lowerCamelCase : Any = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) # forward without prompt embeds lowerCamelCase : Dict = self.get_dummy_inputs(__a ) lowerCamelCase : Any = 3 * ["""this is a negative prompt"""] lowerCamelCase : Optional[int] = negative_prompt lowerCamelCase : Tuple = 3 * [inputs["""prompt"""]] lowerCamelCase : List[Any] = sd_pipe(**__a ) lowerCamelCase : Optional[int] = output.images[0, -3:, -3:, -1] # forward with prompt embeds lowerCamelCase : Tuple = self.get_dummy_inputs(__a ) lowerCamelCase : List[Any] = 3 * ["""this is a negative prompt"""] lowerCamelCase : Tuple = 3 * [inputs.pop("""prompt""" )] ( ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ) : Union[str, Any] = sd_pipe.encode_prompt(__a , negative_prompt=__a ) lowerCamelCase : int = sd_pipe( **__a , prompt_embeds=__a , negative_prompt_embeds=__a , pooled_prompt_embeds=__a , negative_pooled_prompt_embeds=__a , ) lowerCamelCase : Union[str, Any] = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4 @slow @require_torch_gpu class A__ ( unittest.TestCase): """simple docstring""" def a__ ( self: Dict )-> str: super().tearDown() gc.collect() torch.cuda.empty_cache() def a__ ( self: Union[str, Any] , __a: Any , __a: Any="cpu" , __a: str=torch.floataa , __a: Any=0 )-> Optional[Any]: lowerCamelCase : Optional[Any] = torch.Generator(device=__a ).manual_seed(__a ) lowerCamelCase : List[Any] = np.random.RandomState(__a ).standard_normal((1, 4, 64, 64) ) lowerCamelCase : List[str] = torch.from_numpy(__a ).to(device=__a , dtype=__a ) lowerCamelCase : int = { """prompt""": """a photograph of an astronaut riding a horse""", """latents""": latents, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def a__ ( self: Optional[int] )-> List[str]: lowerCamelCase : Tuple = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) lowerCamelCase : Optional[int] = self.get_inputs(__a ) lowerCamelCase : Optional[Any] = pipe(**__a ).images lowerCamelCase : Dict = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) lowerCamelCase : List[str] = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] ) assert np.abs(image_slice - expected_slice ).max() < 7e-3
42
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, is_vision_available, ) __lowerCamelCase :Union[str, Any] = {"""configuration_vit""": ["""VIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTConfig""", """ViTOnnxConfig"""]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase :str = ["""ViTFeatureExtractor"""] __lowerCamelCase :Dict = ["""ViTImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase :str = [ """VIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """ViTForImageClassification""", """ViTForMaskedImageModeling""", """ViTModel""", """ViTPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase :Dict = [ """TFViTForImageClassification""", """TFViTModel""", """TFViTPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase :List[str] = [ """FlaxViTForImageClassification""", """FlaxViTModel""", """FlaxViTPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_vit import ViTFeatureExtractor from .image_processing_vit import ViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit import ( VIT_PRETRAINED_MODEL_ARCHIVE_LIST, ViTForImageClassification, ViTForMaskedImageModeling, ViTModel, ViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel else: import sys __lowerCamelCase :Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
721
"""simple docstring""" import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class A__ : """simple docstring""" def a__ ( self: Optional[int] , __a: Optional[int] , __a: Tuple , __a: Optional[int] )-> List[str]: return None class A__ : """simple docstring""" def a__ ( self: Optional[int] , __a: Tuple , __a: str , __a: str , __a: str )-> Tuple: return None class A__ ( unittest.TestCase): """simple docstring""" snake_case__ : Optional[Any] =[ # (model_name, model_kwargs) ('''bert-base-cased''', {}), ('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def a__ ( self: Optional[Any] )-> int: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__a , """tf""" , 12 , **__a ) @require_torch @slow def a__ ( self: str )-> int: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__a , """pt""" , 12 , **__a ) @require_torch @slow def a__ ( self: Union[str, Any] )-> Dict: from transformers import BertModel lowerCamelCase : int = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""] with NamedTemporaryFile(mode="""w+t""" ) as vocab_file: vocab_file.write("""\n""".join(__a ) ) vocab_file.flush() lowerCamelCase : Dict = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: lowerCamelCase : List[str] = BertModel(BertConfig(vocab_size=len(__a ) ) ) model.save_pretrained(__a ) self._test_export(__a , """pt""" , 12 , __a ) @require_tf @slow def a__ ( self: Optional[Any] )-> Optional[int]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowerCamelCase : Optional[int] = self._test_export(__a , """tf""" , 12 , **__a ) lowerCamelCase : Tuple = quantize(Path(__a ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__a ).stat().st_size: self.fail("""Quantized model is bigger than initial ONNX model""" ) @require_torch @slow def a__ ( self: Any )-> Optional[int]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowerCamelCase : Any = self._test_export(__a , """pt""" , 12 , **__a ) lowerCamelCase : Dict = quantize(__a ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__a ).stat().st_size: self.fail("""Quantized model is bigger than initial ONNX model""" ) def a__ ( self: List[Any] , __a: Optional[Any] , __a: List[Any] , __a: Union[str, Any] , __a: Optional[Any]=None , **__a: Optional[int] )-> Any: try: # Compute path with TemporaryDirectory() as tempdir: lowerCamelCase : Optional[Any] = Path(__a ).joinpath("""model.onnx""" ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(__a , __a , __a , __a , __a , **__a ) return path except Exception as e: self.fail(__a ) @require_torch @require_tokenizers @slow def a__ ( self: Tuple )-> Dict: from transformers import BertModel lowerCamelCase : int = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) ) lowerCamelCase : List[Any] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" ) self._test_infer_dynamic_axis(__a , __a , """pt""" ) @require_tf @require_tokenizers @slow def a__ ( self: Optional[Any] )-> List[Any]: from transformers import TFBertModel lowerCamelCase : Union[str, Any] = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) ) lowerCamelCase : str = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" ) self._test_infer_dynamic_axis(__a , __a , """tf""" ) def a__ ( self: List[str] , __a: str , __a: Optional[Any] , __a: str )-> List[Any]: lowerCamelCase : List[str] = FeatureExtractionPipeline(__a , __a ) lowerCamelCase : List[str] = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""] lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = infer_shapes(__a , __a ) # Assert all variables are present self.assertEqual(len(__a ) , len(__a ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] , __a ) self.assertSequenceEqual(variable_names[3:] , __a ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} ) self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} ) def a__ ( self: List[Any] )-> int: lowerCamelCase : List[str] = ["""input_ids""", """attention_mask""", """token_type_ids"""] lowerCamelCase : str = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]} lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncContiguousArgs() , __a , __a ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(__a ) , 3 ) # Should have exactly the same input names self.assertEqual(set(__a ) , set(__a ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(__a , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncNonContiguousArgs() , __a , __a ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(__a ) , 1 ) self.assertEqual(len(__a ) , 1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] , tokens["""input_ids"""] ) self.assertEqual(ordered_input_names[0] , """input_ids""" ) def a__ ( self: Tuple )-> Tuple: lowerCamelCase : Optional[int] = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" ) self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
42
0
"""simple docstring""" from urllib.parse import quote import pytest from datasets.utils.hub import hf_hub_url @pytest.mark.parametrize("""repo_id""" , ["""canonical_dataset_name""", """org-name/dataset-name"""] ) @pytest.mark.parametrize("""path""" , ["""filename.csv""", """filename with blanks.csv"""] ) @pytest.mark.parametrize("""revision""" , [None, """v2"""] ) def snake_case ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ) -> Union[str, Any]: lowerCamelCase : Optional[int] = hf_hub_url(repo_id=UpperCamelCase__ , path=UpperCamelCase__ , revision=UpperCamelCase__ ) assert url == F'https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(UpperCamelCase__ )}'
700
"""simple docstring""" import unittest from knapsack import greedy_knapsack as kp class A__ ( unittest.TestCase): """simple docstring""" def a__ ( self: Optional[int] )-> Union[str, Any]: lowerCamelCase : Tuple = [10, 20, 30, 40, 50, 60] lowerCamelCase : Union[str, Any] = [2, 4, 6, 8, 10, 12] lowerCamelCase : Union[str, Any] = 100 self.assertEqual(kp.calc_profit(__a , __a , __a ) , 210 ) def a__ ( self: str )-> str: self.assertRaisesRegex(__a , """max_weight must greater than zero.""" ) def a__ ( self: str )-> List[Any]: self.assertRaisesRegex(__a , """Weight can not be negative.""" ) def a__ ( self: Any )-> Dict: self.assertRaisesRegex(__a , """Profit can not be negative.""" ) def a__ ( self: Optional[Any] )-> List[Any]: self.assertRaisesRegex(__a , """max_weight must greater than zero.""" ) def a__ ( self: Optional[Any] )-> Tuple: self.assertRaisesRegex( __a , """The length of profit and weight must be same.""" ) if __name__ == "__main__": unittest.main()
42
0
"""simple docstring""" def snake_case ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ) -> List[Any]: if n == 0: return 1 elif n % 2 == 1: return (binary_exponentiation(SCREAMING_SNAKE_CASE_ , n - 1 , SCREAMING_SNAKE_CASE_ ) * a) % mod else: lowerCamelCase : List[str] = binary_exponentiation(SCREAMING_SNAKE_CASE_ , n / 2 , SCREAMING_SNAKE_CASE_ ) return (b * b) % mod # a prime number __lowerCamelCase :Tuple = 701 __lowerCamelCase :str = 1_000_000_000 __lowerCamelCase :Any = 10 # using binary exponentiation function, O(log(p)): print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p) print((a / b) % p == (a * b ** (p - 2)) % p)
701
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) __lowerCamelCase :List[str] = { 'configuration_owlvit': [ 'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OwlViTConfig', 'OwlViTOnnxConfig', 'OwlViTTextConfig', 'OwlViTVisionConfig', ], 'processing_owlvit': ['OwlViTProcessor'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase :Optional[int] = ['OwlViTFeatureExtractor'] __lowerCamelCase :List[str] = ['OwlViTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase :Optional[Any] = [ 'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'OwlViTModel', 'OwlViTPreTrainedModel', 'OwlViTTextModel', 'OwlViTVisionModel', 'OwlViTForObjectDetection', ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys __lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
42
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase :List[str] = logging.get_logger(__name__) __lowerCamelCase :str = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'} class A__ ( __lowercase): """simple docstring""" snake_case__ : Dict ='''openai-gpt''' snake_case__ : Optional[Any] ={ '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self: str , __a: List[str]=40_478 , __a: Union[str, Any]=512 , __a: List[str]=768 , __a: str=12 , __a: Dict=12 , __a: str="gelu" , __a: Dict=0.1 , __a: Any=0.1 , __a: Optional[int]=0.1 , __a: str=1e-5 , __a: Dict=0.02 , __a: Any="cls_index" , __a: Tuple=True , __a: Union[str, Any]=None , __a: Union[str, Any]=True , __a: Any=0.1 , **__a: int , )-> Tuple: lowerCamelCase : Union[str, Any] = vocab_size lowerCamelCase : str = n_positions lowerCamelCase : str = n_embd lowerCamelCase : Dict = n_layer lowerCamelCase : Any = n_head lowerCamelCase : int = afn lowerCamelCase : Dict = resid_pdrop lowerCamelCase : Union[str, Any] = embd_pdrop lowerCamelCase : int = attn_pdrop lowerCamelCase : List[Any] = layer_norm_epsilon lowerCamelCase : Union[str, Any] = initializer_range lowerCamelCase : Optional[Any] = summary_type lowerCamelCase : int = summary_use_proj lowerCamelCase : Tuple = summary_activation lowerCamelCase : Union[str, Any] = summary_first_dropout lowerCamelCase : str = summary_proj_to_labels super().__init__(**__a )
702
"""simple docstring""" import collections import inspect import unittest from transformers import FocalNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, ) from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A__ : """simple docstring""" def __init__( self: List[Any] , __a: List[str] , __a: Optional[int]=13 , __a: List[str]=32 , __a: int=2 , __a: List[str]=3 , __a: Union[str, Any]=16 , __a: int=[32, 64, 128] , __a: Optional[Any]=[1, 2, 1] , __a: Optional[int]=[2, 2, 4] , __a: Tuple=2 , __a: Dict=2.0 , __a: List[str]=True , __a: Optional[Any]=0.0 , __a: Any=0.0 , __a: List[Any]=0.1 , __a: List[str]="gelu" , __a: Tuple=False , __a: Union[str, Any]=True , __a: Optional[int]=0.02 , __a: Tuple=1e-5 , __a: int=True , __a: List[Any]=None , __a: Optional[int]=True , __a: Dict=10 , __a: List[str]=8 , __a: Any=["stage1", "stage2"] , __a: Union[str, Any]=[1, 2] , )-> Dict: lowerCamelCase : Dict = parent lowerCamelCase : Optional[Any] = batch_size lowerCamelCase : Union[str, Any] = image_size lowerCamelCase : Optional[int] = patch_size lowerCamelCase : Any = num_channels lowerCamelCase : Any = embed_dim lowerCamelCase : Dict = hidden_sizes lowerCamelCase : List[Any] = depths lowerCamelCase : Tuple = num_heads lowerCamelCase : List[Any] = window_size lowerCamelCase : str = mlp_ratio lowerCamelCase : str = qkv_bias lowerCamelCase : str = hidden_dropout_prob lowerCamelCase : Dict = attention_probs_dropout_prob lowerCamelCase : Tuple = drop_path_rate lowerCamelCase : Dict = hidden_act lowerCamelCase : Tuple = use_absolute_embeddings lowerCamelCase : List[str] = patch_norm lowerCamelCase : List[str] = layer_norm_eps lowerCamelCase : str = initializer_range lowerCamelCase : Tuple = is_training lowerCamelCase : int = scope lowerCamelCase : Union[str, Any] = use_labels lowerCamelCase : List[str] = type_sequence_label_size lowerCamelCase : str = encoder_stride lowerCamelCase : List[str] = out_features lowerCamelCase : Optional[int] = out_indices def a__ ( self: Optional[Any] )-> Union[str, Any]: lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase : str = None if self.use_labels: lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase : str = self.get_config() return config, pixel_values, labels def a__ ( self: List[Any] )-> Optional[int]: return FocalNetConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def a__ ( self: Tuple , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]: lowerCamelCase : Tuple = FocalNetModel(config=__a ) model.to(__a ) model.eval() lowerCamelCase : Tuple = model(__a ) lowerCamelCase : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowerCamelCase : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def a__ ( self: Optional[int] , __a: Dict , __a: Tuple , __a: List[Any] )-> int: lowerCamelCase : List[Any] = FocalNetBackbone(config=__a ) model.to(__a ) model.eval() lowerCamelCase : Optional[Any] = model(__a ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] ) # verify backbone works with out_features=None lowerCamelCase : Dict = None lowerCamelCase : Dict = FocalNetBackbone(config=__a ) model.to(__a ) model.eval() lowerCamelCase : Any = model(__a ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def a__ ( self: Optional[int] , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]: lowerCamelCase : Tuple = FocalNetForMaskedImageModeling(config=__a ) model.to(__a ) model.eval() lowerCamelCase : List[str] = model(__a ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCamelCase : List[str] = 1 lowerCamelCase : Any = FocalNetForMaskedImageModeling(__a ) model.to(__a ) model.eval() lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase : Tuple = model(__a ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def a__ ( self: str , __a: Optional[Any] , __a: Optional[Any] , __a: Tuple )-> str: lowerCamelCase : Optional[Any] = self.type_sequence_label_size lowerCamelCase : Optional[Any] = FocalNetForImageClassification(__a ) model.to(__a ) model.eval() lowerCamelCase : List[str] = model(__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCamelCase : int = 1 lowerCamelCase : List[Any] = FocalNetForImageClassification(__a ) model.to(__a ) model.eval() lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase : Optional[Any] = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def a__ ( self: int )-> Optional[int]: lowerCamelCase : str = self.prepare_config_and_inputs() lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = config_and_inputs lowerCamelCase : List[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class A__ ( __lowercase , __lowercase , unittest.TestCase): """simple docstring""" snake_case__ : List[str] =( ( FocalNetModel, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetBackbone, ) if is_torch_available() else () ) snake_case__ : Optional[int] =( {'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification} if is_torch_available() else {} ) snake_case__ : Tuple =False snake_case__ : Dict =False snake_case__ : Dict =False snake_case__ : Tuple =False snake_case__ : Optional[int] =False def a__ ( self: Union[str, Any] )-> Optional[int]: lowerCamelCase : List[str] = FocalNetModelTester(self ) lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__a , embed_dim=37 , has_text_modality=__a ) def a__ ( self: List[str] )-> List[str]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a__ ( self: List[str] )-> Union[str, Any]: return def a__ ( self: Tuple )-> Tuple: lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def a__ ( self: List[Any] )-> Dict: lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__a ) def a__ ( self: List[Any] )-> Tuple: lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__a ) def a__ ( self: List[str] )-> Dict: lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a ) @unittest.skip(reason="""FocalNet does not use inputs_embeds""" ) def a__ ( self: Optional[Any] )-> str: pass @unittest.skip(reason="""FocalNet does not use feedforward chunking""" ) def a__ ( self: Optional[Any] )-> Dict: pass def a__ ( self: Optional[Any] )-> Dict: lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: lowerCamelCase : Any = model_class(__a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCamelCase : Dict = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__a , nn.Linear ) ) def a__ ( self: Tuple )-> Optional[int]: lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: lowerCamelCase : int = model_class(__a ) lowerCamelCase : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase : Any = [*signature.parameters.keys()] lowerCamelCase : List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __a ) def a__ ( self: str , __a: Union[str, Any] , __a: int , __a: Tuple , __a: List[str] )-> Union[str, Any]: lowerCamelCase : List[Any] = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): lowerCamelCase : List[str] = model(**self._prepare_for_class(__a , __a ) ) lowerCamelCase : List[str] = outputs.hidden_states lowerCamelCase : Tuple = getattr( self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(__a ) , __a ) # FocalNet has a different seq_length lowerCamelCase : Tuple = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCamelCase : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) lowerCamelCase : Optional[Any] = outputs.reshaped_hidden_states self.assertEqual(len(__a ) , __a ) lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = reshaped_hidden_states[0].shape lowerCamelCase : Tuple = ( reshaped_hidden_states[0].view(__a , __a , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def a__ ( self: Any )-> Any: lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase : Union[str, Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes[:-1]: lowerCamelCase : List[str] = True self.check_hidden_states_output(__a , __a , __a , __a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase : List[Any] = True self.check_hidden_states_output(__a , __a , __a , __a ) def a__ ( self: str )-> Union[str, Any]: lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase : List[str] = 3 lowerCamelCase : Any = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowerCamelCase : Optional[int] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCamelCase : Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowerCamelCase : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes[:-1]: lowerCamelCase : str = True self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase : Union[str, Any] = True self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) ) @slow def a__ ( self: Optional[int] )-> List[Any]: for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase : List[str] = FocalNetModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def a__ ( self: str )-> Any: lowerCamelCase , lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase : int = _config_zero_init(__a ) for model_class in self.all_model_classes: lowerCamelCase : int = model_class(config=__a ) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) @require_vision @require_torch class A__ ( unittest.TestCase): """simple docstring""" @cached_property def a__ ( self: Optional[int] )-> Optional[Any]: # TODO update organization return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None @slow def a__ ( self: int )-> Optional[Any]: lowerCamelCase : Tuple = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(__a ) lowerCamelCase : Any = self.default_image_processor lowerCamelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) lowerCamelCase : int = image_processor(images=__a , return_tensors="""pt""" ).to(__a ) # forward pass with torch.no_grad(): lowerCamelCase : Any = model(**__a ) # verify the logits lowerCamelCase : Tuple = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , __a ) lowerCamelCase : List[str] = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(__a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) ) self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 ) @require_torch class A__ ( __lowercase , unittest.TestCase): """simple docstring""" snake_case__ : str =(FocalNetBackbone,) if is_torch_available() else () snake_case__ : Optional[int] =FocalNetConfig snake_case__ : str =False def a__ ( self: Union[str, Any] )-> Tuple: lowerCamelCase : str = FocalNetModelTester(self )
42
0
"""simple docstring""" import os import re import warnings from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_ta import TaTokenizer else: __lowerCamelCase :List[str] = None __lowerCamelCase :Tuple = logging.get_logger(__name__) __lowerCamelCase :List[str] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} __lowerCamelCase :List[str] = { 'vocab_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model', 't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model', 't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model', }, 'tokenizer_file': { 't5-small': 'https://huggingface.co/t5-small/resolve/main/tokenizer.json', 't5-base': 'https://huggingface.co/t5-base/resolve/main/tokenizer.json', 't5-large': 'https://huggingface.co/t5-large/resolve/main/tokenizer.json', 't5-3b': 'https://huggingface.co/t5-3b/resolve/main/tokenizer.json', 't5-11b': 'https://huggingface.co/t5-11b/resolve/main/tokenizer.json', }, } # TODO(PVP) - this should be removed in Transformers v5 __lowerCamelCase :Any = { 't5-small': 512, 't5-base': 512, 't5-large': 512, 't5-3b': 512, 't5-11b': 512, } class A__ ( __A): """simple docstring""" snake_case__ : Optional[Any] =VOCAB_FILES_NAMES snake_case__ : Optional[int] =PRETRAINED_VOCAB_FILES_MAP snake_case__ : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ : List[str] =['''input_ids''', '''attention_mask'''] snake_case__ : str =TaTokenizer snake_case__ : Tuple =[] def __init__( self: Tuple , __a: str=None , __a: Union[str, Any]=None , __a: Optional[int]="</s>" , __a: Dict="<unk>" , __a: Dict="<pad>" , __a: Union[str, Any]=100 , __a: List[str]=None , **__a: int , )-> Any: if extra_ids > 0 and additional_special_tokens is None: lowerCamelCase : Tuple = [f'<extra_id_{i}>' for i in range(__a )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra special tokens lowerCamelCase : Dict = len(set(filter(lambda __a : bool("""extra_id_""" in str(__a ) ) , __a ) ) ) if extra_tokens != extra_ids: raise ValueError( f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are' """ provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids""" """ tokens""" ) super().__init__( __a , tokenizer_file=__a , eos_token=__a , unk_token=__a , pad_token=__a , extra_ids=__a , additional_special_tokens=__a , **__a , ) lowerCamelCase : List[str] = vocab_file lowerCamelCase : List[Any] = False if not self.vocab_file else True lowerCamelCase : Optional[int] = extra_ids @staticmethod def a__ ( __a: str , __a: Tuple , __a: Dict )-> Union[str, Any]: if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes: lowerCamelCase : Optional[Any] = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path] if init_max_model_length is not None and init_max_model_length != max_model_length: return init_max_model_length elif init_max_model_length is None: warnings.warn( """This tokenizer was incorrectly instantiated with a model max length of""" f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this' """ behavior is kept to avoid breaking backwards compatibility when padding/encoding with""" """ `truncation is True`.\n- Be aware that you SHOULD NOT rely on""" f' {pretrained_model_name_or_path} automatically truncating your input to' f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences' f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with' """ `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please""" """ instantiate this tokenizer with `model_max_length` set to your preferred value.""" , __a , ) return max_model_length def a__ ( self: Optional[Any] , __a: List[str] , __a: Union[str, Any] = None )-> List[str]: if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(__a ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return lowerCamelCase : Union[str, Any] = os.path.join( __a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ): copyfile(self.vocab_file , __a ) logger.info(f'Copy vocab file to {out_vocab_file}' ) return (out_vocab_file,) def a__ ( self: int , __a: Optional[int] , __a: Optional[int] = None )-> Dict: lowerCamelCase : int = token_ids_a + [self.eos_token_id] if token_ids_a is None: return self.prefix_tokens + token_ids_a else: lowerCamelCase : Tuple = token_ids_a + [self.eos_token_id] return self.prefix_tokens + token_ids_a + token_ids_a def a__ ( self: Optional[Any] , __a: Optional[int] , __a: Dict = None )-> List[Any]: lowerCamelCase : Dict = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def a__ ( self: Optional[Any] )-> List[Any]: return list( set(filter(lambda __a : bool(re.search(r"""<extra_id_\d+>""" , __a ) ) is not None , self.additional_special_tokens ) ) ) def a__ ( self: List[Any] )-> str: return [self.convert_tokens_to_ids(__a ) for token in self.get_sentinel_tokens()]
703
"""simple docstring""" import os def snake_case ( ) -> Optional[Any]: with open(os.path.dirname(UpperCamelCase__ ) + """/grid.txt""" ) as f: lowerCamelCase : int = [] # noqa: E741 for _ in range(20 ): l.append([int(UpperCamelCase__ ) for x in f.readline().split()] ) lowerCamelCase : Union[str, Any] = 0 # right for i in range(20 ): for j in range(17 ): lowerCamelCase : Dict = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3] if temp > maximum: lowerCamelCase : Tuple = temp # down for i in range(17 ): for j in range(20 ): lowerCamelCase : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j] if temp > maximum: lowerCamelCase : Optional[Any] = temp # diagonal 1 for i in range(17 ): for j in range(17 ): lowerCamelCase : List[Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3] if temp > maximum: lowerCamelCase : List[str] = temp # diagonal 2 for i in range(17 ): for j in range(3 , 20 ): lowerCamelCase : List[str] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3] if temp > maximum: lowerCamelCase : List[Any] = temp return maximum if __name__ == "__main__": print(solution())
42
0
"""simple docstring""" import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __lowerCamelCase :Any = get_tests_dir('fixtures/spiece.model') @require_sentencepiece @require_tokenizers class A__ ( __lowercase , unittest.TestCase): """simple docstring""" snake_case__ : Union[str, Any] =DebertaVaTokenizer snake_case__ : Union[str, Any] =DebertaVaTokenizerFast snake_case__ : int =True snake_case__ : Optional[Any] =True def a__ ( self: Optional[int] )-> Optional[Any]: super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase : Tuple = DebertaVaTokenizer(_A , unk_token="""<unk>""" ) tokenizer.save_pretrained(self.tmpdirname ) def a__ ( self: List[Any] , __a: Dict )-> Optional[Any]: lowerCamelCase : Any = """this is a test""" lowerCamelCase : int = """this is a test""" return input_text, output_text def a__ ( self: Optional[Any] )-> str: lowerCamelCase : Tuple = """<pad>""" lowerCamelCase : List[str] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A ) def a__ ( self: Tuple )-> int: lowerCamelCase : Tuple = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<pad>""" ) self.assertEqual(vocab_keys[1] , """<unk>""" ) self.assertEqual(vocab_keys[-1] , """[PAD]""" ) self.assertEqual(len(_A ) , 30_001 ) def a__ ( self: Dict )-> Dict: self.assertEqual(self.get_tokenizer().vocab_size , 30_000 ) def a__ ( self: Optional[Any] )-> Optional[Any]: # fmt: off lowerCamelCase : Tuple = """ \tHeLLo!how \n Are yoU? """ lowerCamelCase : Any = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""] # fmt: on lowerCamelCase : Tuple = DebertaVaTokenizer(_A , do_lower_case=_A ) lowerCamelCase : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) lowerCamelCase : List[str] = DebertaVaTokenizerFast(_A , do_lower_case=_A ) lowerCamelCase : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) @unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" ) def a__ ( self: Any )-> Any: pass @unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" ) def a__ ( self: Any )-> List[str]: pass def a__ ( self: Tuple )-> str: # fmt: off lowerCamelCase : List[str] = """I was born in 92000, and this is falsé.""" lowerCamelCase : List[Any] = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ] # fmt: on lowerCamelCase : List[str] = DebertaVaTokenizer(_A , split_by_punct=_A ) lowerCamelCase : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) lowerCamelCase : List[str] = DebertaVaTokenizerFast(_A , split_by_punct=_A ) lowerCamelCase : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) def a__ ( self: List[str] )-> List[Any]: # fmt: off lowerCamelCase : Any = """I was born in 92000, and this is falsé.""" lowerCamelCase : Any = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ] # fmt: on lowerCamelCase : Optional[Any] = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A ) lowerCamelCase : List[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) lowerCamelCase : Tuple = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A ) lowerCamelCase : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) def a__ ( self: Optional[Any] )-> str: # fmt: off lowerCamelCase : List[Any] = """I was born in 92000, and this is falsé.""" lowerCamelCase : Dict = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ] # fmt: on lowerCamelCase : List[str] = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A ) lowerCamelCase : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) lowerCamelCase : Optional[int] = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A ) lowerCamelCase : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) def a__ ( self: int )-> int: # fmt: off lowerCamelCase : Any = """I was born in 92000, and this is falsé.""" lowerCamelCase : Any = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ] # fmt: on lowerCamelCase : int = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A ) lowerCamelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) lowerCamelCase : Union[str, Any] = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A ) lowerCamelCase : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) def a__ ( self: str )-> int: # fmt: off lowerCamelCase : Union[str, Any] = """ \tHeLLo!how \n Are yoU? """ lowerCamelCase : Optional[int] = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""] # fmt: on lowerCamelCase : str = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A ) lowerCamelCase : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) lowerCamelCase : str = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A ) lowerCamelCase : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) def a__ ( self: Optional[Any] )-> Tuple: lowerCamelCase : Union[str, Any] = self.get_tokenizer() lowerCamelCase : List[Any] = self.get_rust_tokenizer() lowerCamelCase : int = """I was born in 92000, and this is falsé.""" lowerCamelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) ) lowerCamelCase : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) ) self.assertListEqual(_A , _A ) lowerCamelCase : str = tokenizer.encode(_A , add_special_tokens=_A ) lowerCamelCase : Optional[int] = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) lowerCamelCase : Dict = self.get_rust_tokenizer() lowerCamelCase : Any = tokenizer.encode(_A ) lowerCamelCase : int = rust_tokenizer.encode(_A ) self.assertListEqual(_A , _A ) def a__ ( self: str )-> List[Any]: lowerCamelCase : Optional[int] = """This is a test""" lowerCamelCase : Tuple = [13, 1, 4_398, 25, 21, 1_289] lowerCamelCase : Dict = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""] lowerCamelCase : Tuple = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""] lowerCamelCase : Optional[Any] = DebertaVaTokenizer(_A , keep_accents=_A ) lowerCamelCase : List[Any] = DebertaVaTokenizerFast(_A , keep_accents=_A ) lowerCamelCase : str = tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) lowerCamelCase : List[str] = tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) lowerCamelCase : int = tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual(_A , _A ) lowerCamelCase : Optional[int] = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) lowerCamelCase : str = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) lowerCamelCase : List[Any] = rust_tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual(_A , _A ) # fmt: off lowerCamelCase : Optional[Any] = """I was born in 92000, and this is falsé.""" lowerCamelCase : Optional[Any] = [13, 1, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] lowerCamelCase : Optional[Any] = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ] lowerCamelCase : Optional[int] = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ] # fmt: on lowerCamelCase : Tuple = tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) lowerCamelCase : List[Any] = tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) lowerCamelCase : int = tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual(_A , _A ) lowerCamelCase : List[Any] = rust_tokenizer.encode(_A , add_special_tokens=_A ) self.assertListEqual(_A , _A ) lowerCamelCase : Union[str, Any] = rust_tokenizer.tokenize(_A ) self.assertListEqual(_A , _A ) lowerCamelCase : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(_A ) self.assertListEqual(_A , _A ) def a__ ( self: List[str] )-> Any: lowerCamelCase : str = DebertaVaTokenizer(_A ) lowerCamelCase : Any = tokenizer.encode("""sequence builders""" ) lowerCamelCase : str = tokenizer.encode("""multi-sequence build""" ) lowerCamelCase : str = tokenizer.build_inputs_with_special_tokens(_A ) lowerCamelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(_A , _A ) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _A ) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _A , ) @slow def a__ ( self: List[str] )-> int: # fmt: off lowerCamelCase : Any = {"""input_ids""": [[1, 39_867, 36, 19_390, 486, 27, 35_052, 81_436, 18, 60_685, 1_225, 7, 35_052, 81_436, 18, 9_367, 16_899, 18, 15_937, 53, 594, 773, 18, 16_287, 30_465, 36, 15_937, 6, 41_139, 38, 36_979, 60_763, 191, 6, 34_132, 99, 6, 50_538, 390, 43_230, 6, 34_132, 2_779, 20_850, 14, 699, 1_072, 1_194, 36, 382, 10_901, 53, 7, 699, 1_072, 2_084, 36, 20_422, 630, 53, 19, 105, 3_049, 1_896, 1_053, 16_899, 1_506, 11, 37_978, 4_243, 7, 1_237, 31_869, 200, 16_566, 654, 6, 35_052, 81_436, 7, 55_630, 13_593, 4, 2], [1, 26, 15_011, 13, 667, 8, 1_053, 18, 23_611, 1_237, 72_356, 12_820, 34, 104_134, 1_209, 35, 13_313, 6_627, 21, 202, 347, 7, 164, 2_399, 11, 46, 4_485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_232, 2_864, 15_785, 14_951, 105, 5, 8_581, 1_250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_A , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
704
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionAttendAndExcitePipeline, UNetaDConditionModel, ) from diffusers.utils import load_numpy, skip_mps, slow from diffusers.utils.testing_utils import require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin __lowerCamelCase :Any = False @skip_mps class A__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase): """simple docstring""" snake_case__ : Optional[Any] =StableDiffusionAttendAndExcitePipeline snake_case__ : Any =False snake_case__ : Dict =TEXT_TO_IMAGE_PARAMS snake_case__ : Any =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''}) snake_case__ : Dict =TEXT_TO_IMAGE_IMAGE_PARAMS snake_case__ : str =TEXT_TO_IMAGE_IMAGE_PARAMS @classmethod def a__ ( cls: Dict )-> Tuple: super().setUpClass() torch.use_deterministic_algorithms(__a ) @classmethod def a__ ( cls: Union[str, Any] )-> Any: super().tearDownClass() torch.use_deterministic_algorithms(__a ) def a__ ( self: Tuple )-> Union[str, Any]: torch.manual_seed(0 ) lowerCamelCase : str = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , ) lowerCamelCase : Union[str, Any] = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=__a , set_alpha_to_one=__a , ) torch.manual_seed(0 ) lowerCamelCase : Union[str, Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) lowerCamelCase : str = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , ) lowerCamelCase : Optional[int] = CLIPTextModel(__a ) lowerCamelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowerCamelCase : List[str] = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def a__ ( self: Tuple , __a: int , __a: Union[str, Any]=0 )-> Optional[Any]: if str(__a ).startswith("""mps""" ): lowerCamelCase : Tuple = torch.manual_seed(__a ) else: lowerCamelCase : str = torch.Generator(device=__a ).manual_seed(__a ) lowerCamelCase : Dict = { """prompt""": """a cat and a frog""", """token_indices""": [2, 5], """generator""": generator, """num_inference_steps""": 1, """guidance_scale""": 6.0, """output_type""": """numpy""", """max_iter_to_alter""": 2, """thresholds""": {0: 0.7}, } return inputs def a__ ( self: Dict )-> str: lowerCamelCase : Tuple = """cpu""" lowerCamelCase : List[str] = self.get_dummy_components() lowerCamelCase : List[Any] = self.pipeline_class(**__a ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) lowerCamelCase : Any = self.get_dummy_inputs(__a ) lowerCamelCase : Union[str, Any] = pipe(**__a ).images lowerCamelCase : Tuple = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 64, 64, 3) ) lowerCamelCase : Optional[Any] = np.array( [0.63_90_53_64, 0.62_89_73_07, 0.48_59_90_17, 0.5_13_36_24, 0.5_55_00_48, 0.45_76_95_16, 0.50_32_69_73, 0.5_02_31_39, 0.45_38_44_96] ) lowerCamelCase : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(__a , 1e-3 ) def a__ ( self: int )-> Optional[Any]: super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 ) def a__ ( self: Union[str, Any] )-> Optional[int]: # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def a__ ( self: Tuple )-> int: self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 ) def a__ ( self: Dict )-> List[Any]: super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 ) def a__ ( self: Optional[int] )-> Dict: super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 ) def a__ ( self: Any )-> Tuple: super().test_save_load_local(expected_max_difference=5e-4 ) def a__ ( self: str )-> str: super().test_save_load_optional_components(expected_max_difference=4e-4 ) @require_torch_gpu @slow class A__ ( unittest.TestCase): """simple docstring""" @classmethod def a__ ( cls: Any )-> Tuple: super().setUpClass() torch.use_deterministic_algorithms(__a ) @classmethod def a__ ( cls: Dict )-> Optional[int]: super().tearDownClass() torch.use_deterministic_algorithms(__a ) def a__ ( self: int )-> Optional[int]: super().tearDown() gc.collect() torch.cuda.empty_cache() def a__ ( self: int )-> Optional[Any]: lowerCamelCase : List[Any] = torch.manual_seed(51 ) lowerCamelCase : List[str] = StableDiffusionAttendAndExcitePipeline.from_pretrained( """CompVis/stable-diffusion-v1-4""" , safety_checker=__a , torch_dtype=torch.floataa ) pipe.to("""cuda""" ) lowerCamelCase : Dict = """a painting of an elephant with glasses""" lowerCamelCase : Any = [5, 7] lowerCamelCase : Tuple = pipe( prompt=__a , token_indices=__a , guidance_scale=7.5 , generator=__a , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0] lowerCamelCase : Union[str, Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" ) assert np.abs((expected_image - image).max() ) < 5e-1
42
0
"""simple docstring""" from math import loga def snake_case ( UpperCamelCase__ : Optional[int] ) -> List[str]: if a < 0: raise ValueError("""Input value must be a positive integer""" ) elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): raise TypeError("""Input value must be a 'int' type""" ) return 0 if (a == 0) else int(loga(a & -a ) ) if __name__ == "__main__": import doctest doctest.testmod()
705
"""simple docstring""" import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel from transformers.models.esm.modeling_esm import ( ESM_PRETRAINED_MODEL_ARCHIVE_LIST, EsmEmbeddings, create_position_ids_from_input_ids, ) class A__ : """simple docstring""" def __init__( self: List[str] , __a: List[str] , __a: Dict=13 , __a: Tuple=7 , __a: Dict=False , __a: str=True , __a: List[Any]=False , __a: Dict=True , __a: Any=33 , __a: Optional[Any]=32 , __a: List[Any]=5 , __a: Any=4 , __a: Dict=37 , __a: str="gelu" , __a: str=0.1 , __a: int=0.1 , __a: Optional[int]=512 , __a: List[Any]=16 , __a: int=2 , __a: int=0.02 , __a: Optional[int]=3 , __a: str=4 , __a: Tuple=None , )-> Tuple: lowerCamelCase : Union[str, Any] = parent lowerCamelCase : Tuple = batch_size lowerCamelCase : Any = seq_length lowerCamelCase : Any = is_training lowerCamelCase : Tuple = use_input_mask lowerCamelCase : int = use_token_type_ids lowerCamelCase : List[str] = use_labels lowerCamelCase : Optional[int] = vocab_size lowerCamelCase : Tuple = hidden_size lowerCamelCase : List[str] = num_hidden_layers lowerCamelCase : Optional[int] = num_attention_heads lowerCamelCase : Optional[Any] = intermediate_size lowerCamelCase : Optional[Any] = hidden_act lowerCamelCase : Union[str, Any] = hidden_dropout_prob lowerCamelCase : Optional[Any] = attention_probs_dropout_prob lowerCamelCase : Any = max_position_embeddings lowerCamelCase : str = type_vocab_size lowerCamelCase : List[Any] = type_sequence_label_size lowerCamelCase : Optional[Any] = initializer_range lowerCamelCase : Union[str, Any] = num_labels lowerCamelCase : Optional[Any] = num_choices lowerCamelCase : Any = scope def a__ ( self: Optional[int] )-> List[Any]: lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase : Dict = None if self.use_input_mask: lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase : Any = None lowerCamelCase : int = None lowerCamelCase : Union[str, Any] = None if self.use_labels: lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase : List[str] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def a__ ( self: Tuple )-> Union[str, Any]: return EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def a__ ( self: List[Any] , __a: List[str] , __a: str , __a: Tuple , __a: List[str] , __a: List[str] , __a: str )-> int: lowerCamelCase : Optional[int] = EsmModel(config=__a ) model.to(__a ) model.eval() lowerCamelCase : int = model(__a , attention_mask=__a ) lowerCamelCase : str = model(__a ) lowerCamelCase : Optional[Any] = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def a__ ( self: int , __a: Union[str, Any] , __a: Optional[int] , __a: List[str] , __a: str , __a: List[str] , __a: Tuple )-> int: lowerCamelCase : str = EsmForMaskedLM(config=__a ) model.to(__a ) model.eval() lowerCamelCase : List[Any] = model(__a , attention_mask=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a__ ( self: List[str] , __a: List[Any] , __a: List[str] , __a: int , __a: Union[str, Any] , __a: List[Any] , __a: Tuple )-> List[str]: lowerCamelCase : Tuple = self.num_labels lowerCamelCase : Dict = EsmForTokenClassification(config=__a ) model.to(__a ) model.eval() lowerCamelCase : int = model(__a , attention_mask=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def a__ ( self: Optional[int] )-> Optional[int]: lowerCamelCase : Any = self.prepare_config_and_inputs() ( ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ) : Tuple = config_and_inputs lowerCamelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class A__ ( __lowercase , __lowercase , unittest.TestCase): """simple docstring""" snake_case__ : Any =False snake_case__ : Dict =( ( EsmForMaskedLM, EsmModel, EsmForSequenceClassification, EsmForTokenClassification, ) if is_torch_available() else () ) snake_case__ : Dict =() snake_case__ : Optional[int] =( { '''feature-extraction''': EsmModel, '''fill-mask''': EsmForMaskedLM, '''text-classification''': EsmForSequenceClassification, '''token-classification''': EsmForTokenClassification, '''zero-shot''': EsmForSequenceClassification, } if is_torch_available() else {} ) snake_case__ : Any =True def a__ ( self: Optional[int] )-> Optional[int]: lowerCamelCase : Optional[Any] = EsmModelTester(self ) lowerCamelCase : Any = ConfigTester(self , config_class=__a , hidden_size=37 ) def a__ ( self: List[Any] )-> Optional[Any]: self.config_tester.run_common_tests() def a__ ( self: int )-> Optional[Any]: lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def a__ ( self: Tuple )-> Any: lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCamelCase : Tuple = type self.model_tester.create_and_check_model(*__a ) def a__ ( self: List[str] )-> Tuple: lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__a ) def a__ ( self: int )-> Optional[Any]: lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__a ) @slow def a__ ( self: Any )-> List[Any]: for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase : int = EsmModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def a__ ( self: str )-> List[str]: lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0] lowerCamelCase : Union[str, Any] = EsmEmbeddings(config=__a ) lowerCamelCase : List[str] = torch.as_tensor([[12, 31, 13, model.padding_idx]] ) lowerCamelCase : Union[str, Any] = torch.as_tensor( [ [ 0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx, ] ] ) lowerCamelCase : Optional[Any] = create_position_ids_from_input_ids(__a , model.padding_idx ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__a , __a ) ) ) def a__ ( self: Optional[int] )-> int: lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()[0] lowerCamelCase : Any = EsmEmbeddings(config=__a ) lowerCamelCase : Dict = torch.empty(2 , 4 , 30 ) lowerCamelCase : List[Any] = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] lowerCamelCase : Any = torch.as_tensor([expected_single_positions, expected_single_positions] ) lowerCamelCase : List[str] = embeddings.create_position_ids_from_inputs_embeds(__a ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__a , __a ) ) ) @unittest.skip("""Esm does not support embedding resizing""" ) def a__ ( self: Any )-> Optional[Any]: pass @unittest.skip("""Esm does not support embedding resizing""" ) def a__ ( self: Dict )-> Dict: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def a__ ( self: List[str] )-> Dict: pass @require_torch class A__ ( __lowercase): """simple docstring""" @slow def a__ ( self: Any )-> Union[str, Any]: with torch.no_grad(): lowerCamelCase : Union[str, Any] = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" ) model.eval() lowerCamelCase : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] ) lowerCamelCase : Tuple = model(__a )[0] lowerCamelCase : Dict = 33 lowerCamelCase : List[str] = torch.Size((1, 6, vocab_size) ) self.assertEqual(output.shape , __a ) lowerCamelCase : Tuple = torch.tensor( [[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) ) @slow def a__ ( self: Dict )-> str: with torch.no_grad(): lowerCamelCase : Any = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" ) model.eval() lowerCamelCase : Optional[Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) lowerCamelCase : Any = model(__a )[0] # compare the actual values for a slice. lowerCamelCase : Tuple = torch.tensor( [[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
42
0
"""simple docstring""" import numpy as np def snake_case ( UpperCamelCase__ : Any ) -> int: return 1 / (1 + np.exp(-vector )) def snake_case ( UpperCamelCase__ : Optional[int] ) -> Union[str, Any]: return vector * sigmoid(lowerCAmelCase_ ) if __name__ == "__main__": import doctest doctest.testmod()
706
"""simple docstring""" import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __lowerCamelCase :str = get_tests_dir('fixtures/spiece.model') @require_sentencepiece @require_tokenizers class A__ ( __lowercase , unittest.TestCase): """simple docstring""" snake_case__ : List[str] =AlbertTokenizer snake_case__ : Optional[Any] =AlbertTokenizerFast snake_case__ : Optional[int] =True snake_case__ : Any =True snake_case__ : Optional[int] =True def a__ ( self: Dict )-> Optional[Any]: super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase : int = AlbertTokenizer(__a ) tokenizer.save_pretrained(self.tmpdirname ) def a__ ( self: Tuple , __a: Tuple )-> Union[str, Any]: lowerCamelCase : List[str] = """this is a test""" lowerCamelCase : int = """this is a test""" return input_text, output_text def a__ ( self: Any )-> List[Any]: lowerCamelCase : int = """<pad>""" lowerCamelCase : Optional[int] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a ) def a__ ( self: Tuple )-> str: lowerCamelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<pad>""" ) self.assertEqual(vocab_keys[1] , """<unk>""" ) self.assertEqual(vocab_keys[-1] , """▁eloquent""" ) self.assertEqual(len(__a ) , 30_000 ) def a__ ( self: List[str] )-> Any: self.assertEqual(self.get_tokenizer().vocab_size , 30_000 ) def a__ ( self: Optional[Any] )-> Union[str, Any]: if not self.test_rust_tokenizer: return lowerCamelCase : str = self.get_tokenizer() lowerCamelCase : Tuple = self.get_rust_tokenizer() lowerCamelCase : Union[str, Any] = """I was born in 92000, and this is falsé.""" lowerCamelCase : List[str] = tokenizer.tokenize(__a ) lowerCamelCase : Tuple = rust_tokenizer.tokenize(__a ) self.assertListEqual(__a , __a ) lowerCamelCase : Dict = tokenizer.encode(__a , add_special_tokens=__a ) lowerCamelCase : List[str] = rust_tokenizer.encode(__a , add_special_tokens=__a ) self.assertListEqual(__a , __a ) lowerCamelCase : Any = self.get_rust_tokenizer() lowerCamelCase : List[str] = tokenizer.encode(__a ) lowerCamelCase : str = rust_tokenizer.encode(__a ) self.assertListEqual(__a , __a ) def a__ ( self: Tuple )-> List[Any]: lowerCamelCase : List[str] = AlbertTokenizer(__a , keep_accents=__a ) lowerCamelCase : int = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(__a , ["""▁this""", """▁is""", """▁a""", """▁test"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [48, 25, 21, 1_289] ) lowerCamelCase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( __a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] ) lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(__a ) self.assertListEqual(__a , [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] ) lowerCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__a ) self.assertListEqual( __a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , ) def a__ ( self: Tuple )-> str: lowerCamelCase : str = AlbertTokenizer(__a ) lowerCamelCase : Union[str, Any] = tokenizer.encode("""sequence builders""" ) lowerCamelCase : List[Any] = tokenizer.encode("""multi-sequence build""" ) lowerCamelCase : Any = tokenizer.build_inputs_with_special_tokens(__a ) lowerCamelCase : str = tokenizer.build_inputs_with_special_tokens(__a , __a ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ] @slow def a__ ( self: Any )-> Dict: # fmt: off lowerCamelCase : Optional[Any] = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__a , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
42
0
import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() __lowerCamelCase :Union[str, Any] = logging.get_logger(__name__) def snake_case ( UpperCamelCase__ : Optional[int] ) -> Any: lowerCamelCase : Any = OrderedDict() for key, value in state_dict.items(): if key.startswith("""module.encoder""" ): lowerCamelCase : List[Any] = key.replace("""module.encoder""" , """glpn.encoder""" ) if key.startswith("""module.decoder""" ): lowerCamelCase : str = key.replace("""module.decoder""" , """decoder.stages""" ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 lowerCamelCase : Union[str, Any] = key[key.find("""patch_embed""" ) + len("""patch_embed""" )] lowerCamelCase : Optional[Any] = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(_snake_case )-1}' ) if "norm" in key: lowerCamelCase : str = key.replace("""norm""" , """layer_norm""" ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 lowerCamelCase : Any = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )] lowerCamelCase : List[Any] = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(_snake_case )-1}' ) if "layer_norm1" in key: lowerCamelCase : Tuple = key.replace("""layer_norm1""" , """layer_norm_1""" ) if "layer_norm2" in key: lowerCamelCase : List[Any] = key.replace("""layer_norm2""" , """layer_norm_2""" ) if "block" in key: # replace for example block1 by block.0 lowerCamelCase : Union[str, Any] = key[key.find("""block""" ) + len("""block""" )] lowerCamelCase : Dict = key.replace(F'block{idx}' , F'block.{int(_snake_case )-1}' ) if "attn.q" in key: lowerCamelCase : Any = key.replace("""attn.q""" , """attention.self.query""" ) if "attn.proj" in key: lowerCamelCase : Tuple = key.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in key: lowerCamelCase : List[str] = key.replace("""attn""" , """attention.self""" ) if "fc1" in key: lowerCamelCase : Dict = key.replace("""fc1""" , """dense1""" ) if "fc2" in key: lowerCamelCase : List[Any] = key.replace("""fc2""" , """dense2""" ) if "linear_pred" in key: lowerCamelCase : Any = key.replace("""linear_pred""" , """classifier""" ) if "linear_fuse" in key: lowerCamelCase : Optional[Any] = key.replace("""linear_fuse.conv""" , """linear_fuse""" ) lowerCamelCase : List[str] = key.replace("""linear_fuse.bn""" , """batch_norm""" ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 lowerCamelCase : Optional[Any] = key[key.find("""linear_c""" ) + len("""linear_c""" )] lowerCamelCase : Optional[int] = key.replace(F'linear_c{idx}' , F'linear_c.{int(_snake_case )-1}' ) if "bot_conv" in key: lowerCamelCase : Dict = key.replace("""bot_conv""" , """0.convolution""" ) if "skip_conv1" in key: lowerCamelCase : List[str] = key.replace("""skip_conv1""" , """1.convolution""" ) if "skip_conv2" in key: lowerCamelCase : Optional[int] = key.replace("""skip_conv2""" , """2.convolution""" ) if "fusion1" in key: lowerCamelCase : Any = key.replace("""fusion1""" , """1.fusion""" ) if "fusion2" in key: lowerCamelCase : Optional[Any] = key.replace("""fusion2""" , """2.fusion""" ) if "fusion3" in key: lowerCamelCase : Dict = key.replace("""fusion3""" , """3.fusion""" ) if "fusion" in key and "conv" in key: lowerCamelCase : List[Any] = key.replace("""conv""" , """convolutional_layer""" ) if key.startswith("""module.last_layer_depth""" ): lowerCamelCase : Optional[Any] = key.replace("""module.last_layer_depth""" , """head.head""" ) lowerCamelCase : Union[str, Any] = value return new_state_dict def snake_case ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple ) -> Tuple: for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) lowerCamelCase : Optional[Any] = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' ) lowerCamelCase : Dict = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' ) # next, add keys and values (in that order) to the state dict lowerCamelCase : Optional[int] = kv_weight[ : config.hidden_sizes[i], : ] lowerCamelCase : Optional[Any] = kv_bias[: config.hidden_sizes[i]] lowerCamelCase : str = kv_weight[ config.hidden_sizes[i] :, : ] lowerCamelCase : Optional[int] = kv_bias[config.hidden_sizes[i] :] def snake_case ( ) -> List[Any]: lowerCamelCase : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCamelCase : Tuple = Image.open(requests.get(_snake_case , stream=_snake_case ).raw ) return image @torch.no_grad() def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : str=None ) -> Any: lowerCamelCase : Dict = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] ) # load image processor (only resize + rescale) lowerCamelCase : str = GLPNImageProcessor() # prepare image lowerCamelCase : Optional[Any] = prepare_img() lowerCamelCase : int = image_processor(images=_snake_case , return_tensors="""pt""" ).pixel_values logger.info("""Converting model...""" ) # load original state dict lowerCamelCase : Optional[Any] = torch.load(_snake_case , map_location=torch.device("""cpu""" ) ) # rename keys lowerCamelCase : List[str] = rename_keys(_snake_case ) # key and value matrices need special treatment read_in_k_v(_snake_case , _snake_case ) # create HuggingFace model and load state dict lowerCamelCase : Optional[int] = GLPNForDepthEstimation(_snake_case ) model.load_state_dict(_snake_case ) model.eval() # forward pass lowerCamelCase : Tuple = model(_snake_case ) lowerCamelCase : Tuple = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: lowerCamelCase : int = torch.tensor( [[4.4_1_4_7, 4.0_8_7_3, 4.0_6_7_3], [3.7_8_9_0, 3.2_8_8_1, 3.1_5_2_5], [3.7_6_7_4, 3.5_4_2_3, 3.4_9_1_3]] ) elif "kitti" in model_name: lowerCamelCase : List[Any] = torch.tensor( [[3.4_2_9_1, 2.7_8_6_5, 2.5_1_5_1], [3.2_8_4_1, 2.7_0_2_1, 2.3_5_0_2], [3.1_1_4_7, 2.4_6_2_5, 2.2_4_8_1]] ) else: raise ValueError(F'Unknown model name: {model_name}' ) lowerCamelCase : List[Any] = torch.Size([1, 480, 640] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , _snake_case , atol=1E-4 ) print("""Looks ok!""" ) # finally, push to hub if required if push_to_hub: logger.info("""Pushing model and image processor to the hub...""" ) model.push_to_hub( repo_path_or_name=Path(_snake_case , _snake_case ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=_snake_case , ) image_processor.push_to_hub( repo_path_or_name=Path(_snake_case , _snake_case ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=_snake_case , ) if __name__ == "__main__": __lowerCamelCase :Any = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.' ) parser.add_argument( '--model_name', default='glpn-kitti', type=str, help='Name of the model in case you\'re pushing to the hub.', ) __lowerCamelCase :List[Any] = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
707
"""simple docstring""" __lowerCamelCase :List[Any] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} __lowerCamelCase :Union[str, Any] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def snake_case ( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] ) -> list[int]: lowerCamelCase : Tuple = True lowerCamelCase : Any = [] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) order.append(UpperCamelCase__ ) return order def snake_case ( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] ) -> list[int]: lowerCamelCase : List[Any] = True lowerCamelCase : int = [vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return component def snake_case ( UpperCamelCase__ : dict[int, list[int]] ) -> list[list[int]]: lowerCamelCase : int = len(UpperCamelCase__ ) * [False] lowerCamelCase : dict[int, list[int]] = {vert: [] for vert in range(len(UpperCamelCase__ ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(UpperCamelCase__ ) lowerCamelCase : int = [] for i, was_visited in enumerate(UpperCamelCase__ ): if not was_visited: order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase : Tuple = [] lowerCamelCase : str = len(UpperCamelCase__ ) * [False] for i in range(len(UpperCamelCase__ ) ): lowerCamelCase : Any = order[len(UpperCamelCase__ ) - i - 1] if not visited[vert]: lowerCamelCase : List[str] = find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) components_list.append(UpperCamelCase__ ) return components_list
42
0
"""simple docstring""" import pytest import datasets.config from datasets.utils.info_utils import is_small_dataset @pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] ) @pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] ) def lowerCAmelCase ( UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple ) -> Optional[int]: if input_in_memory_max_size != "default": monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , _A ) lowerCamelCase : Any = datasets.config.IN_MEMORY_MAX_SIZE if input_in_memory_max_size == "default": assert in_memory_max_size == 0 else: assert in_memory_max_size == input_in_memory_max_size if dataset_size and in_memory_max_size: lowerCamelCase : List[str] = dataset_size < in_memory_max_size else: lowerCamelCase : Tuple = False lowerCamelCase : Optional[int] = is_small_dataset(_A ) assert result == expected
708
"""simple docstring""" from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase :str = logging.get_logger(__name__) __lowerCamelCase :Any = { 'huggingface/time-series-transformer-tourism-monthly': ( 'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json' ), # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer } class A__ ( __lowercase): """simple docstring""" snake_case__ : List[Any] ='''time_series_transformer''' snake_case__ : List[Any] ={ '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', '''num_hidden_layers''': '''encoder_layers''', } def __init__( self: List[str] , __a: Optional[int] = None , __a: Optional[int] = None , __a: str = "student_t" , __a: str = "nll" , __a: int = 1 , __a: List[int] = [1, 2, 3, 4, 5, 6, 7] , __a: Optional[Union[str, bool]] = "mean" , __a: int = 0 , __a: int = 0 , __a: int = 0 , __a: int = 0 , __a: Optional[List[int]] = None , __a: Optional[List[int]] = None , __a: int = 32 , __a: int = 32 , __a: int = 2 , __a: int = 2 , __a: int = 2 , __a: int = 2 , __a: bool = True , __a: str = "gelu" , __a: int = 64 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: int = 100 , __a: float = 0.02 , __a: Tuple=True , **__a: str , )-> Any: # time series specific configuration lowerCamelCase : str = prediction_length lowerCamelCase : Optional[Any] = context_length or prediction_length lowerCamelCase : Tuple = distribution_output lowerCamelCase : Any = loss lowerCamelCase : List[Any] = input_size lowerCamelCase : int = num_time_features lowerCamelCase : Dict = lags_sequence lowerCamelCase : Optional[int] = scaling lowerCamelCase : int = num_dynamic_real_features lowerCamelCase : Tuple = num_static_real_features lowerCamelCase : Any = num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(__a ) != num_static_categorical_features: raise ValueError( """The cardinality should be a list of the same length as `num_static_categorical_features`""" ) lowerCamelCase : int = cardinality else: lowerCamelCase : Dict = [0] if embedding_dimension and num_static_categorical_features > 0: if len(__a ) != num_static_categorical_features: raise ValueError( """The embedding dimension should be a list of the same length as `num_static_categorical_features`""" ) lowerCamelCase : str = embedding_dimension else: lowerCamelCase : str = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] lowerCamelCase : Any = num_parallel_samples # Transformer architecture configuration lowerCamelCase : Any = input_size * len(__a ) + self._number_of_features lowerCamelCase : List[str] = d_model lowerCamelCase : Tuple = encoder_attention_heads lowerCamelCase : Optional[int] = decoder_attention_heads lowerCamelCase : Union[str, Any] = encoder_ffn_dim lowerCamelCase : str = decoder_ffn_dim lowerCamelCase : str = encoder_layers lowerCamelCase : Any = decoder_layers lowerCamelCase : Optional[int] = dropout lowerCamelCase : List[str] = attention_dropout lowerCamelCase : Tuple = activation_dropout lowerCamelCase : Optional[int] = encoder_layerdrop lowerCamelCase : int = decoder_layerdrop lowerCamelCase : Optional[int] = activation_function lowerCamelCase : Optional[Any] = init_std lowerCamelCase : Optional[Any] = use_cache super().__init__(is_encoder_decoder=__a , **__a ) @property def a__ ( self: int )-> int: return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
42
0
"""simple docstring""" import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class A__ ( unittest.TestCase): """simple docstring""" def a__ ( self: List[Any] )-> Optional[int]: lowerCamelCase : Union[str, Any] = """hf-internal-testing/tiny-random-t5""" lowerCamelCase : int = AutoTokenizer.from_pretrained(UpperCamelCase__ ) lowerCamelCase : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ ) lowerCamelCase : Tuple = tokenizer("""This is me""" , return_tensors="""pt""" ) lowerCamelCase : Union[str, Any] = model.to_bettertransformer() self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) lowerCamelCase : Dict = model.generate(**UpperCamelCase__ ) lowerCamelCase : Dict = model.reverse_bettertransformer() self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ ) self.assertFalse( any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) lowerCamelCase : str = model_reloaded.generate(**UpperCamelCase__ ) self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ ) ) def a__ ( self: int )-> int: lowerCamelCase : Union[str, Any] = """hf-internal-testing/tiny-random-t5""" lowerCamelCase : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ ) lowerCamelCase : Optional[Any] = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(UpperCamelCase__ ): model.save_pretrained(UpperCamelCase__ ) lowerCamelCase : Optional[Any] = model.reverse_bettertransformer() model.save_pretrained(UpperCamelCase__ )
709
"""simple docstring""" from __future__ import annotations __lowerCamelCase :int = 10 def snake_case ( UpperCamelCase__ : list[int] ) -> list[int]: lowerCamelCase : int = 1 lowerCamelCase : Union[str, Any] = max(UpperCamelCase__ ) while placement <= max_digit: # declare and initialize empty buckets lowerCamelCase : list[list] = [[] for _ in range(UpperCamelCase__ )] # split list_of_ints between the buckets for i in list_of_ints: lowerCamelCase : Any = int((i / placement) % RADIX ) buckets[tmp].append(UpperCamelCase__ ) # put each buckets' contents into list_of_ints lowerCamelCase : Dict = 0 for b in range(UpperCamelCase__ ): for i in buckets[b]: lowerCamelCase : List[str] = i a += 1 # move to next placement *= RADIX return list_of_ints if __name__ == "__main__": import doctest doctest.testmod()
42
0
"""simple docstring""" from typing import List, Optional, Tuple, Union import PIL import torch from torchvision import transforms from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput from diffusers.schedulers import DDIMScheduler from diffusers.utils import randn_tensor __lowerCamelCase :Optional[Any] = transforms.Compose( [ transforms.Resize((256, 256)), transforms.ToTensor(), transforms.Normalize([0.5], [0.5]), ] ) def snake_case ( UpperCamelCase__ : Optional[int] ) -> Union[str, Any]: if isinstance(UpperCamelCase__ , torch.Tensor ): return image elif isinstance(UpperCamelCase__ , PIL.Image.Image ): lowerCamelCase : int = [image] lowerCamelCase : int = [trans(img.convert("""RGB""" ) ) for img in image] lowerCamelCase : Dict = torch.stack(UpperCamelCase__ ) return image class A__ ( snake_case__): """simple docstring""" def __init__( self: str , __a: Any , __a: int )-> str: super().__init__() # make sure scheduler can always be converted to DDIM lowerCamelCase : Tuple = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=_A , scheduler=_A ) def a__ ( self: Optional[int] , __a: List[Any] )-> Optional[Any]: if strength < 0 or strength > 1: raise ValueError(f'The value of strength should in [0.0, 1.0] but is {strength}' ) def a__ ( self: Union[str, Any] , __a: str , __a: Union[str, Any] , __a: Optional[Any] )-> str: # get the original timestep using init_timestep lowerCamelCase : Optional[int] = min(int(num_inference_steps * strength ) , _A ) lowerCamelCase : str = max(num_inference_steps - init_timestep , 0 ) lowerCamelCase : Optional[int] = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def a__ ( self: Optional[Any] , __a: str , __a: Tuple , __a: Union[str, Any] , __a: Any , __a: Dict , __a: Union[str, Any]=None )-> Optional[int]: if not isinstance(_A , (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( f'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_A )}' ) lowerCamelCase : Union[str, Any] = image.to(device=_A , dtype=_A ) if isinstance(_A , _A ) and len(_A ) != batch_size: raise ValueError( f'You have passed a list of generators of length {len(_A )}, but requested an effective batch' f' size of {batch_size}. Make sure the batch size matches the length of the generators.' ) lowerCamelCase : Dict = init_latents.shape lowerCamelCase : Optional[int] = randn_tensor(_A , generator=_A , device=_A , dtype=_A ) # get latents print("""add noise to latents at timestep""" , _A ) lowerCamelCase : List[Any] = self.scheduler.add_noise(_A , _A , _A ) lowerCamelCase : Optional[int] = init_latents return latents @torch.no_grad() def __call__( self: Union[str, Any] , __a: Tuple = None , __a: int = 0.8 , __a: Dict = 1 , __a: Optional[int] = None , __a: Optional[Any] = 0.0 , __a: Any = 50 , __a: List[str] = None , __a: Optional[int] = "pil" , __a: str = True , )-> str: self.check_inputs(_A ) # 2. Preprocess image lowerCamelCase : int = preprocess(_A ) # 3. set timesteps self.scheduler.set_timesteps(_A , device=self.device ) lowerCamelCase : Tuple = self.get_timesteps(_A , _A , self.device ) lowerCamelCase : int = timesteps[:1].repeat(_A ) # 4. Prepare latent variables lowerCamelCase : Dict = self.prepare_latents(_A , _A , _A , self.unet.dtype , self.device , _A ) lowerCamelCase : List[Any] = latents # 5. Denoising loop for t in self.progress_bar(_A ): # 1. predict noise model_output lowerCamelCase : Optional[Any] = self.unet(_A , _A ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 lowerCamelCase : List[Any] = self.scheduler.step( _A , _A , _A , eta=_A , use_clipped_model_output=_A , generator=_A , ).prev_sample lowerCamelCase : List[Any] = (image / 2 + 0.5).clamp(0 , 1 ) lowerCamelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowerCamelCase : Optional[int] = self.numpy_to_pil(_A ) if not return_dict: return (image, latent_timestep.item()) return ImagePipelineOutput(images=_A )
710
"""simple docstring""" import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=None ) -> Tuple: # set parameter of one layer assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match' lowerCamelCase : Dict = nn.Parameter(UpperCamelCase__ ) if bias is not None: assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match' lowerCamelCase : Any = nn.Parameter(UpperCamelCase__ ) def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ) -> Union[str, Any]: # set torch weights for 1-to-1 comparison lowerCamelCase : Dict = np.asarray(weights[0] ) lowerCamelCase : List[Any] = np.asarray(weights[1] ) lowerCamelCase : List[str] = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , ) set_param( torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , ) def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any ) -> List[Any]: # set torch weights for 1-to-1 comparison lowerCamelCase : Tuple = np.asarray(weights[0] ) lowerCamelCase : Any = np.asarray(weights[1] ) lowerCamelCase : List[Any] = np.asarray(weights[2] ) lowerCamelCase : List[str] = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , ) set_param( torch_layer.self_attention.key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , ) set_param( torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , ) def snake_case ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ) -> Optional[Any]: # layernorm 1 lowerCamelCase : str = weights[0][0][0] lowerCamelCase : Optional[int] = np.asarray(layer_norm_a[0] ) lowerCamelCase : Tuple = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , ) # lsh weights + output lowerCamelCase : List[Any] = weights[0][1] if len(UpperCamelCase__ ) < 4: set_layer_weights_in_torch_lsh(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ ) else: set_layer_weights_in_torch_local(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ ) # intermediate weighs lowerCamelCase : int = weights[2][0][1][2] # Chunked Feed Forward if len(UpperCamelCase__ ) == 4: lowerCamelCase : Dict = intermediate_weights[2] # layernorm 2 lowerCamelCase : Optional[int] = np.asarray(intermediate_weights[0][0] ) lowerCamelCase : Tuple = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , ) # intermediate dense lowerCamelCase : Optional[Any] = np.asarray(intermediate_weights[1][0] ) lowerCamelCase : Union[str, Any] = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , ) # intermediate out lowerCamelCase : Optional[int] = np.asarray(intermediate_weights[4][0] ) lowerCamelCase : List[Any] = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , ) def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int ) -> List[Any]: # reformer model lowerCamelCase : List[Any] = torch_model.reformer # word embeds lowerCamelCase : Union[str, Any] = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(UpperCamelCase__ ) , ) if isinstance(weights[3] , UpperCamelCase__ ): lowerCamelCase : Union[str, Any] = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): lowerCamelCase : str = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), F'{position_embeddings[emb_idx]} emb does not match' lowerCamelCase : Dict = nn.Parameter(torch.tensor(UpperCamelCase__ ) ) lowerCamelCase : int = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( UpperCamelCase__ ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): lowerCamelCase : Dict = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # output layer norm lowerCamelCase : Any = np.asarray(weights[7][0] ) lowerCamelCase : List[str] = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , ) # output embeddings lowerCamelCase : List[Any] = np.asarray(weights[9][0] ) lowerCamelCase : Optional[int] = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , ) def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str ) -> Optional[int]: # Initialise PyTorch model lowerCamelCase : Union[str, Any] = ReformerConfig.from_json_file(UpperCamelCase__ ) print(F'Building PyTorch model from configuration: {config}' ) lowerCamelCase : str = ReformerModelWithLMHead(UpperCamelCase__ ) with open(UpperCamelCase__ , """rb""" ) as f: lowerCamelCase : str = pickle.load(UpperCamelCase__ )["""weights"""] set_model_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , config.hidden_size ) # Save pytorch-model print(F'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , UpperCamelCase__ ) if __name__ == "__main__": __lowerCamelCase :Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained Reformer model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __lowerCamelCase :Optional[int] = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
42
0
"""simple docstring""" def snake_case ( ) -> list[list[int]]: return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )] __lowerCamelCase :List[str] = generate_large_matrix() __lowerCamelCase :List[str] = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def snake_case ( UpperCamelCase__ : Tuple ) -> None: assert all(row == sorted(__snake_case , reverse=__snake_case ) for row in grid ) assert all(list(__snake_case ) == sorted(__snake_case , reverse=__snake_case ) for col in zip(*__snake_case ) ) def snake_case ( UpperCamelCase__ : Tuple ) -> int: lowerCamelCase : Tuple = 0 lowerCamelCase : Optional[int] = len(__snake_case ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: lowerCamelCase : Any = (left + right) // 2 lowerCamelCase : Optional[int] = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: lowerCamelCase : Optional[Any] = mid + 1 else: lowerCamelCase : Optional[Any] = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(__snake_case ) def snake_case ( UpperCamelCase__ : List[Any] ) -> int: lowerCamelCase : int = 0 lowerCamelCase : Dict = len(grid[0] ) for i in range(len(__snake_case ) ): lowerCamelCase : Optional[int] = find_negative_index(grid[i][:bound] ) total += bound return (len(__snake_case ) * len(grid[0] )) - total def snake_case ( UpperCamelCase__ : List[Any] ) -> int: return len([number for row in grid for number in row if number < 0] ) def snake_case ( UpperCamelCase__ : Union[str, Any] ) -> int: lowerCamelCase : Optional[int] = 0 for row in grid: for i, number in enumerate(__snake_case ): if number < 0: total += len(__snake_case ) - i break return total def snake_case ( ) -> None: from timeit import timeit print("""Running benchmarks""" ) lowerCamelCase : int = ( """from __main__ import count_negatives_binary_search, """ """count_negatives_brute_force, count_negatives_brute_force_with_break, grid""" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): lowerCamelCase : List[Any] = timeit(F'{func}(grid=grid)' , setup=__snake_case , number=500 ) print(F'{func}() took {time:0.4f} seconds' ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
711
"""simple docstring""" import inspect import unittest import torch import torch.nn as nn from accelerate.hooks import ( AlignDevicesHook, ModelHook, SequentialHook, add_hook_to_module, attach_align_device_hook, remove_hook_from_module, remove_hook_from_submodules, ) from accelerate.test_utils import require_multi_gpu class A__ ( nn.Module): """simple docstring""" def __init__( self: Dict )-> Dict: super().__init__() lowerCamelCase : Tuple = nn.Linear(3 , 4 ) lowerCamelCase : Optional[Any] = nn.BatchNormad(4 ) lowerCamelCase : Optional[Any] = nn.Linear(4 , 5 ) def a__ ( self: List[str] , __a: List[Any] )-> Optional[Any]: return self.lineara(self.batchnorm(self.lineara(__a ) ) ) class A__ ( __lowercase): """simple docstring""" def a__ ( self: Tuple , __a: int , *__a: Any , **__a: Tuple )-> Tuple: return (args[0] + 1,) + args[1:], kwargs class A__ ( __lowercase): """simple docstring""" def a__ ( self: Optional[int] , __a: List[str] , __a: List[Any] )-> List[str]: return output + 1 class A__ ( unittest.TestCase): """simple docstring""" def a__ ( self: int )-> str: lowerCamelCase : List[str] = ModelForTest() lowerCamelCase : Dict = ModelHook() add_hook_to_module(__a , __a ) self.assertEqual(test_model._hf_hook , __a ) self.assertTrue(hasattr(__a , """_old_forward""" ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , """forward""" ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] ) remove_hook_from_module(__a ) self.assertFalse(hasattr(__a , """_hf_hook""" ) ) self.assertFalse(hasattr(__a , """_old_forward""" ) ) def a__ ( self: int )-> str: lowerCamelCase : List[str] = ModelForTest() lowerCamelCase : Union[str, Any] = ModelHook() add_hook_to_module(__a , __a ) add_hook_to_module(__a , __a , append=__a ) self.assertEqual(isinstance(test_model._hf_hook , __a ) , __a ) self.assertEqual(len(test_model._hf_hook.hooks ) , 2 ) self.assertTrue(hasattr(__a , """_old_forward""" ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , """forward""" ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] ) remove_hook_from_module(__a ) self.assertFalse(hasattr(__a , """_hf_hook""" ) ) self.assertFalse(hasattr(__a , """_old_forward""" ) ) def a__ ( self: List[Any] )-> List[str]: lowerCamelCase : str = ModelForTest() lowerCamelCase : Dict = torch.randn(2 , 3 ) lowerCamelCase : Union[str, Any] = test_model(x + 1 ) lowerCamelCase : Optional[int] = test_model(x + 2 ) lowerCamelCase : List[Any] = PreForwardHook() add_hook_to_module(__a , __a ) lowerCamelCase : Optional[int] = test_model(__a ) self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain lowerCamelCase : Dict = PreForwardHook() add_hook_to_module(__a , __a ) lowerCamelCase : Tuple = test_model(__a ) self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) ) # You need to use the sequential hook to chain two or more hooks lowerCamelCase : Any = SequentialHook(PreForwardHook() , PreForwardHook() ) add_hook_to_module(__a , __a ) lowerCamelCase : Optional[Any] = test_model(__a ) assert torch.allclose(__a , __a , atol=1e-5 ) def a__ ( self: Any )-> Optional[int]: lowerCamelCase : str = ModelForTest() lowerCamelCase : List[str] = torch.randn(2 , 3 ) lowerCamelCase : int = test_model(__a ) lowerCamelCase : Dict = PostForwardHook() add_hook_to_module(__a , __a ) lowerCamelCase : Tuple = test_model(__a ) self.assertTrue(torch.allclose(__a , output + 1 , atol=1e-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain lowerCamelCase : str = PostForwardHook() add_hook_to_module(__a , __a ) lowerCamelCase : Optional[Any] = test_model(__a ) self.assertTrue(torch.allclose(__a , output + 1 , atol=1e-5 ) ) # You need to use the sequential hook to chain two or more hooks lowerCamelCase : Union[str, Any] = SequentialHook(PostForwardHook() , PostForwardHook() ) add_hook_to_module(__a , __a ) lowerCamelCase : str = test_model(__a ) assert torch.allclose(__a , output + 2 , atol=1e-5 ) def a__ ( self: int )-> Dict: lowerCamelCase : List[Any] = ModelForTest() lowerCamelCase : Optional[int] = torch.randn(2 , 3 ) lowerCamelCase : List[str] = test_model(__a ) lowerCamelCase : Any = PostForwardHook() add_hook_to_module(__a , __a ) lowerCamelCase : str = test_model(__a ) self.assertTrue(torch.allclose(__a , output + 1 ) ) self.assertTrue(outputa.requires_grad ) lowerCamelCase : Optional[int] = True lowerCamelCase : Optional[int] = test_model(__a ) self.assertFalse(outputa.requires_grad ) @require_multi_gpu def a__ ( self: List[str] )-> Union[str, Any]: lowerCamelCase : int = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) ) self.assertEqual(model.lineara.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) ) self.assertEqual(model.lineara.weight.device , torch.device(1 ) ) # We can still make a forward pass. The input does not need to be on any particular device lowerCamelCase : str = torch.randn(2 , 3 ) lowerCamelCase : Dict = model(__a ) self.assertEqual(output.device , torch.device(1 ) ) # We can add a general hook to put back output on same device as input. add_hook_to_module(__a , AlignDevicesHook(io_same_device=__a ) ) lowerCamelCase : Optional[int] = torch.randn(2 , 3 ).to(0 ) lowerCamelCase : str = model(__a ) self.assertEqual(output.device , torch.device(0 ) ) def a__ ( self: List[str] )-> Tuple: lowerCamelCase : Union[str, Any] = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices lowerCamelCase : Tuple = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True} add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) # Buffers are not included in the offload by default, so are on the execution device lowerCamelCase : List[Any] = torch.device(hook_kwargs["""execution_device"""] ) self.assertEqual(model.batchnorm.running_mean.device , __a ) lowerCamelCase : Optional[Any] = torch.randn(2 , 3 ) lowerCamelCase : Optional[Any] = model(__a ) self.assertEqual(output.device , __a ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # Now test with buffers included in the offload lowerCamelCase : Any = { """execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True, """offload_buffers""": True, } add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) ) lowerCamelCase : int = torch.randn(2 , 3 ) lowerCamelCase : Optional[int] = model(__a ) self.assertEqual(output.device , __a ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) def a__ ( self: Any )-> List[str]: lowerCamelCase : int = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices lowerCamelCase : int = 0 if torch.cuda.is_available() else """cpu""" attach_align_device_hook(__a , execution_device=__a , offload=__a ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) # Buffers are not included in the offload by default, so are on the execution device lowerCamelCase : List[Any] = torch.device(__a ) self.assertEqual(model.batchnorm.running_mean.device , __a ) lowerCamelCase : Dict = torch.randn(2 , 3 ) lowerCamelCase : Optional[Any] = model(__a ) self.assertEqual(output.device , __a ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(__a ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # Now test with buffers included in the offload attach_align_device_hook(__a , execution_device=__a , offload=__a , offload_buffers=__a ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) ) lowerCamelCase : Optional[int] = torch.randn(2 , 3 ) lowerCamelCase : int = model(__a ) self.assertEqual(output.device , __a ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(__a ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) def a__ ( self: Optional[Any] )-> List[Any]: lowerCamelCase : List[Any] = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices lowerCamelCase : Any = 0 if torch.cuda.is_available() else """cpu""" attach_align_device_hook( __a , execution_device=__a , offload=__a , weights_map=model.state_dict() ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) # Buffers are not included in the offload by default, so are on the execution device lowerCamelCase : List[Any] = torch.device(__a ) self.assertEqual(model.batchnorm.running_mean.device , __a ) lowerCamelCase : Dict = torch.randn(2 , 3 ) lowerCamelCase : int = model(__a ) self.assertEqual(output.device , __a ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(__a ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # Now test with buffers included in the offload attach_align_device_hook( __a , execution_device=__a , offload=__a , weights_map=model.state_dict() , offload_buffers=__a , ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) ) lowerCamelCase : Tuple = torch.randn(2 , 3 ) lowerCamelCase : Any = model(__a ) self.assertEqual(output.device , __a ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(__a ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
42
0
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionAttendAndExcitePipeline, UNetaDConditionModel, ) from diffusers.utils import load_numpy, skip_mps, slow from diffusers.utils.testing_utils import require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin __lowerCamelCase :Dict = False @skip_mps class A__ ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase): """simple docstring""" snake_case__ : Dict =StableDiffusionAttendAndExcitePipeline snake_case__ : Union[str, Any] =False snake_case__ : List[Any] =TEXT_TO_IMAGE_PARAMS snake_case__ : Any =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''}) snake_case__ : Optional[int] =TEXT_TO_IMAGE_IMAGE_PARAMS snake_case__ : str =TEXT_TO_IMAGE_IMAGE_PARAMS @classmethod def a__ ( cls: Optional[int] )-> List[Any]: super().setUpClass() torch.use_deterministic_algorithms(lowerCamelCase_ ) @classmethod def a__ ( cls: List[str] )-> Union[str, Any]: super().tearDownClass() torch.use_deterministic_algorithms(lowerCamelCase_ ) def a__ ( self: Dict )-> Union[str, Any]: torch.manual_seed(0 ) lowerCamelCase : str = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=lowerCamelCase_ , ) lowerCamelCase : Dict = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , ) torch.manual_seed(0 ) lowerCamelCase : int = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) lowerCamelCase : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , ) lowerCamelCase : List[Any] = CLIPTextModel(lowerCamelCase_ ) lowerCamelCase : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowerCamelCase : List[Any] = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def a__ ( self: Dict , __a: List[str] , __a: Dict=0 )-> Optional[Any]: if str(lowerCamelCase_ ).startswith("""mps""" ): lowerCamelCase : Optional[Any] = torch.manual_seed(lowerCamelCase_ ) else: lowerCamelCase : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ ) lowerCamelCase : List[Any] = { """prompt""": """a cat and a frog""", """token_indices""": [2, 5], """generator""": generator, """num_inference_steps""": 1, """guidance_scale""": 6.0, """output_type""": """numpy""", """max_iter_to_alter""": 2, """thresholds""": {0: 0.7}, } return inputs def a__ ( self: Optional[int] )-> Union[str, Any]: lowerCamelCase : Optional[Any] = """cpu""" lowerCamelCase : Any = self.get_dummy_components() lowerCamelCase : Tuple = self.pipeline_class(**lowerCamelCase_ ) pipe.to(lowerCamelCase_ ) pipe.set_progress_bar_config(disable=lowerCamelCase_ ) lowerCamelCase : List[str] = self.get_dummy_inputs(lowerCamelCase_ ) lowerCamelCase : Tuple = pipe(**lowerCamelCase_ ).images lowerCamelCase : Any = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 64, 64, 3) ) lowerCamelCase : Optional[int] = np.array( [0.63_90_53_64, 0.62_89_73_07, 0.48_59_90_17, 0.5_13_36_24, 0.5_55_00_48, 0.45_76_95_16, 0.50_32_69_73, 0.5_02_31_39, 0.45_38_44_96] ) lowerCamelCase : Any = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowerCamelCase_ , 1e-3 ) def a__ ( self: List[str] )-> str: super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 ) def a__ ( self: Any )-> Optional[Any]: self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def a__ ( self: Tuple )-> Any: self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 ) def a__ ( self: List[str] )-> Dict: super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 ) def a__ ( self: str )-> Union[str, Any]: super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 ) def a__ ( self: Any )-> Optional[int]: super().test_save_load_local(expected_max_difference=5e-4 ) def a__ ( self: int )-> Optional[int]: super().test_save_load_optional_components(expected_max_difference=4e-4 ) @require_torch_gpu @slow class A__ ( unittest.TestCase): """simple docstring""" @classmethod def a__ ( cls: List[Any] )-> List[Any]: super().setUpClass() torch.use_deterministic_algorithms(lowerCamelCase_ ) @classmethod def a__ ( cls: int )-> Optional[int]: super().tearDownClass() torch.use_deterministic_algorithms(lowerCamelCase_ ) def a__ ( self: Tuple )-> int: super().tearDown() gc.collect() torch.cuda.empty_cache() def a__ ( self: int )-> Any: lowerCamelCase : Dict = torch.manual_seed(51 ) lowerCamelCase : List[Any] = StableDiffusionAttendAndExcitePipeline.from_pretrained( """CompVis/stable-diffusion-v1-4""" , safety_checker=lowerCamelCase_ , torch_dtype=torch.floataa ) pipe.to("""cuda""" ) lowerCamelCase : List[Any] = """a painting of an elephant with glasses""" lowerCamelCase : Optional[Any] = [5, 7] lowerCamelCase : Optional[Any] = pipe( prompt=lowerCamelCase_ , token_indices=lowerCamelCase_ , guidance_scale=7.5 , generator=lowerCamelCase_ , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0] lowerCamelCase : Dict = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" ) assert np.abs((expected_image - image).max() ) < 5e-1
712
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __lowerCamelCase :Optional[Any] = { 'configuration_encodec': [ 'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP', 'EncodecConfig', ], 'feature_extraction_encodec': ['EncodecFeatureExtractor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase :Union[str, Any] = [ 'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST', 'EncodecModel', 'EncodecPreTrainedModel', ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys __lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
42
0
"""simple docstring""" def snake_case ( UpperCamelCase__ : list , UpperCamelCase__ : int , UpperCamelCase__ : int = 0 , UpperCamelCase__ : int = 0 ) -> Union[str, Any]: lowerCamelCase : int = right or len(A__ ) - 1 if left > right: return -1 elif list_data[left] == key: return left elif list_data[right] == key: return right else: return search(A__ , A__ , left + 1 , right - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
713
"""simple docstring""" import inspect import unittest from transformers import ConvNextConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A__ : """simple docstring""" def __init__( self: Optional[Any] , __a: Union[str, Any] , __a: Optional[Any]=13 , __a: Optional[Any]=32 , __a: Dict=3 , __a: int=4 , __a: Dict=[10, 20, 30, 40] , __a: int=[2, 2, 3, 2] , __a: Any=True , __a: List[Any]=True , __a: Any=37 , __a: Optional[int]="gelu" , __a: List[str]=10 , __a: Optional[int]=0.02 , __a: Dict=["stage2", "stage3", "stage4"] , __a: List[str]=[2, 3, 4] , __a: List[str]=None , )-> Union[str, Any]: lowerCamelCase : Optional[int] = parent lowerCamelCase : Optional[int] = batch_size lowerCamelCase : Any = image_size lowerCamelCase : Tuple = num_channels lowerCamelCase : str = num_stages lowerCamelCase : List[str] = hidden_sizes lowerCamelCase : str = depths lowerCamelCase : Dict = is_training lowerCamelCase : Optional[Any] = use_labels lowerCamelCase : List[str] = intermediate_size lowerCamelCase : List[str] = hidden_act lowerCamelCase : List[str] = num_labels lowerCamelCase : Union[str, Any] = initializer_range lowerCamelCase : List[Any] = out_features lowerCamelCase : Optional[Any] = out_indices lowerCamelCase : int = scope def a__ ( self: str )-> Optional[Any]: lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase : Dict = None if self.use_labels: lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.num_labels ) lowerCamelCase : Any = self.get_config() return config, pixel_values, labels def a__ ( self: Dict )-> Union[str, Any]: return ConvNextConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def a__ ( self: Optional[Any] , __a: List[Any] , __a: Any , __a: int )-> List[Any]: lowerCamelCase : Optional[int] = ConvNextModel(config=__a ) model.to(__a ) model.eval() lowerCamelCase : Any = model(__a ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def a__ ( self: int , __a: Union[str, Any] , __a: List[Any] , __a: Tuple )-> Optional[int]: lowerCamelCase : str = ConvNextForImageClassification(__a ) model.to(__a ) model.eval() lowerCamelCase : Any = model(__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def a__ ( self: List[Any] , __a: Any , __a: Optional[int] , __a: Tuple )-> List[str]: lowerCamelCase : List[str] = ConvNextBackbone(config=__a ) model.to(__a ) model.eval() lowerCamelCase : int = model(__a ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None lowerCamelCase : Tuple = None lowerCamelCase : List[str] = ConvNextBackbone(config=__a ) model.to(__a ) model.eval() lowerCamelCase : List[Any] = model(__a ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def a__ ( self: Optional[Any] )-> Any: lowerCamelCase : List[Any] = self.prepare_config_and_inputs() lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = config_and_inputs lowerCamelCase : int = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class A__ ( __lowercase , __lowercase , unittest.TestCase): """simple docstring""" snake_case__ : int =( ( ConvNextModel, ConvNextForImageClassification, ConvNextBackbone, ) if is_torch_available() else () ) snake_case__ : str =( {'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification} if is_torch_available() else {} ) snake_case__ : Union[str, Any] =True snake_case__ : Optional[int] =False snake_case__ : Tuple =False snake_case__ : Union[str, Any] =False snake_case__ : Tuple =False def a__ ( self: Optional[Any] )-> Union[str, Any]: lowerCamelCase : Tuple = ConvNextModelTester(self ) lowerCamelCase : List[Any] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 ) def a__ ( self: Optional[int] )-> Dict: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a__ ( self: Optional[int] )-> Optional[Any]: return @unittest.skip(reason="""ConvNext does not use inputs_embeds""" ) def a__ ( self: int )-> Dict: pass @unittest.skip(reason="""ConvNext does not support input and output embeddings""" ) def a__ ( self: Dict )-> Optional[Any]: pass @unittest.skip(reason="""ConvNext does not use feedforward chunking""" ) def a__ ( self: int )-> List[Any]: pass def a__ ( self: Union[str, Any] )-> int: lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase : Any = model_class(__a ) lowerCamelCase : str = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase : Optional[Any] = [*signature.parameters.keys()] lowerCamelCase : List[str] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __a ) def a__ ( self: Optional[int] )-> str: lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def a__ ( self: str )-> int: lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__a ) def a__ ( self: int )-> Optional[int]: def check_hidden_states_output(__a: Tuple , __a: int , __a: Tuple ): lowerCamelCase : str = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): lowerCamelCase : Tuple = model(**self._prepare_for_class(__a , __a ) ) lowerCamelCase : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCamelCase : Optional[int] = self.model_tester.num_stages self.assertEqual(len(__a ) , expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowerCamelCase , lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase : List[Any] = True check_hidden_states_output(__a , __a , __a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase : Tuple = True check_hidden_states_output(__a , __a , __a ) def a__ ( self: Dict )-> Optional[Any]: lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a ) @slow def a__ ( self: Optional[Any] )-> Tuple: for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase : str = ConvNextModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def snake_case ( ) -> Optional[int]: lowerCamelCase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class A__ ( unittest.TestCase): """simple docstring""" @cached_property def a__ ( self: Dict )-> Union[str, Any]: return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None @slow def a__ ( self: List[str] )-> Dict: lowerCamelCase : Tuple = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(__a ) lowerCamelCase : Dict = self.default_image_processor lowerCamelCase : Union[str, Any] = prepare_img() lowerCamelCase : Optional[Any] = image_processor(images=__a , return_tensors="""pt""" ).to(__a ) # forward pass with torch.no_grad(): lowerCamelCase : Any = model(**__a ) # verify the logits lowerCamelCase : Union[str, Any] = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , __a ) lowerCamelCase : Tuple = torch.tensor([-0.02_60, -0.47_39, 0.19_11] ).to(__a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) ) @require_torch class A__ ( unittest.TestCase , __lowercase): """simple docstring""" snake_case__ : Union[str, Any] =(ConvNextBackbone,) if is_torch_available() else () snake_case__ : Optional[Any] =ConvNextConfig snake_case__ : Optional[Any] =False def a__ ( self: List[str] )-> int: lowerCamelCase : Dict = ConvNextModelTester(self )
42
0
"""simple docstring""" import inspect import unittest from transformers import SegformerConfig, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_MAPPING, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerModel, ) from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import SegformerImageProcessor class A__ ( _UpperCamelCase): """simple docstring""" def a__ ( self: Tuple )-> List[Any]: lowerCamelCase : int = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__a , """hidden_sizes""" ) ) self.parent.assertTrue(hasattr(__a , """num_attention_heads""" ) ) self.parent.assertTrue(hasattr(__a , """num_encoder_blocks""" ) ) class A__ : """simple docstring""" def __init__( self: List[str] , __a: List[Any] , __a: int=13 , __a: Dict=64 , __a: Union[str, Any]=3 , __a: int=4 , __a: Union[str, Any]=[2, 2, 2, 2] , __a: List[Any]=[8, 4, 2, 1] , __a: Tuple=[16, 32, 64, 128] , __a: Any=[1, 4, 8, 16] , __a: Optional[Any]=[1, 2, 4, 8] , __a: str=True , __a: Any=True , __a: Union[str, Any]="gelu" , __a: List[str]=0.1 , __a: Tuple=0.1 , __a: List[str]=0.02 , __a: Optional[int]=3 , __a: Union[str, Any]=None , )-> List[Any]: lowerCamelCase : Optional[int] = parent lowerCamelCase : Optional[Any] = batch_size lowerCamelCase : Tuple = image_size lowerCamelCase : int = num_channels lowerCamelCase : Optional[int] = num_encoder_blocks lowerCamelCase : Optional[Any] = sr_ratios lowerCamelCase : Any = depths lowerCamelCase : Dict = hidden_sizes lowerCamelCase : Union[str, Any] = downsampling_rates lowerCamelCase : str = num_attention_heads lowerCamelCase : List[Any] = is_training lowerCamelCase : Dict = use_labels lowerCamelCase : str = hidden_act lowerCamelCase : Optional[Any] = hidden_dropout_prob lowerCamelCase : Optional[int] = attention_probs_dropout_prob lowerCamelCase : Tuple = initializer_range lowerCamelCase : int = num_labels lowerCamelCase : Union[str, Any] = scope def a__ ( self: Dict )-> Tuple: lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase : List[Any] = None if self.use_labels: lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) lowerCamelCase : List[Any] = self.get_config() return config, pixel_values, labels def a__ ( self: List[str] )-> Any: return SegformerConfig( image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def a__ ( self: str , __a: Union[str, Any] , __a: Tuple , __a: Any )-> Optional[int]: lowerCamelCase : Tuple = SegformerModel(config=__a ) model.to(__a ) model.eval() lowerCamelCase : Optional[Any] = model(__a ) lowerCamelCase : Optional[Any] = self.image_size // (self.downsampling_rates[-1] * 2) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) ) def a__ ( self: Union[str, Any] , __a: List[Any] , __a: Any , __a: Union[str, Any] )-> int: lowerCamelCase : List[str] = self.num_labels lowerCamelCase : List[str] = SegformerForSemanticSegmentation(__a ) model.to(__a ) model.eval() lowerCamelCase : int = model(__a ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) lowerCamelCase : Optional[Any] = model(__a , labels=__a ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) self.parent.assertGreater(result.loss , 0.0 ) def a__ ( self: Union[str, Any] , __a: Any , __a: List[Any] , __a: Tuple )-> Tuple: lowerCamelCase : List[str] = 1 lowerCamelCase : List[Any] = SegformerForSemanticSegmentation(config=__a ) model.to(__a ) model.eval() lowerCamelCase : int = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(__a ) lowerCamelCase : int = model(__a , labels=__a ) self.parent.assertGreater(result.loss , 0.0 ) def a__ ( self: Any )-> List[Any]: lowerCamelCase : int = self.prepare_config_and_inputs() lowerCamelCase : Any = config_and_inputs lowerCamelCase : Optional[int] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class A__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase): """simple docstring""" snake_case__ : int =( ( SegformerModel, SegformerForSemanticSegmentation, SegformerForImageClassification, ) if is_torch_available() else () ) snake_case__ : Tuple =( { '''feature-extraction''': SegformerModel, '''image-classification''': SegformerForImageClassification, '''image-segmentation''': SegformerForSemanticSegmentation, } if is_torch_available() else {} ) snake_case__ : str =True snake_case__ : int =False snake_case__ : Dict =False snake_case__ : int =False def a__ ( self: str )-> Optional[Any]: lowerCamelCase : Dict = SegformerModelTester(self ) lowerCamelCase : Union[str, Any] = SegformerConfigTester(self , config_class=__a ) def a__ ( self: str )-> Any: self.config_tester.run_common_tests() def a__ ( self: Union[str, Any] )-> Tuple: lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def a__ ( self: str )-> str: lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_binary_image_segmentation(*__a ) def a__ ( self: Tuple )-> int: lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*__a ) @unittest.skip("""SegFormer does not use inputs_embeds""" ) def a__ ( self: Any )-> Dict: pass @unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" ) def a__ ( self: Optional[Any] )-> Tuple: pass def a__ ( self: List[str] )-> int: lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase : Any = model_class(__a ) lowerCamelCase : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase : Tuple = [*signature.parameters.keys()] lowerCamelCase : Dict = ["pixel_values"] self.assertListEqual(arg_names[:1] , __a ) def a__ ( self: Optional[int] )-> int: lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase : Union[str, Any] = True for model_class in self.all_model_classes: lowerCamelCase : Any = True lowerCamelCase : int = False lowerCamelCase : str = True lowerCamelCase : Union[str, Any] = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): lowerCamelCase : Optional[int] = model(**self._prepare_for_class(__a , __a ) ) lowerCamelCase : Dict = outputs.attentions lowerCamelCase : Optional[int] = sum(self.model_tester.depths ) self.assertEqual(len(__a ) , __a ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowerCamelCase : Union[str, Any] = True lowerCamelCase : Union[str, Any] = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): lowerCamelCase : List[Any] = model(**self._prepare_for_class(__a , __a ) ) lowerCamelCase : Optional[int] = outputs.attentions self.assertEqual(len(__a ) , __a ) # verify the first attentions (first block, first layer) lowerCamelCase : Optional[int] = (self.model_tester.image_size // 4) ** 2 lowerCamelCase : Dict = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) # verify the last attentions (last block, last layer) lowerCamelCase : Tuple = (self.model_tester.image_size // 32) ** 2 lowerCamelCase : Optional[int] = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2 self.assertListEqual( list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , ) lowerCamelCase : Optional[Any] = len(__a ) # Check attention is always last and order is fine lowerCamelCase : Optional[int] = True lowerCamelCase : Optional[int] = True lowerCamelCase : List[str] = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): lowerCamelCase : Optional[Any] = model(**self._prepare_for_class(__a , __a ) ) self.assertEqual(out_len + 1 , len(__a ) ) lowerCamelCase : Any = outputs.attentions self.assertEqual(len(__a ) , __a ) # verify the first attentions (first block, first layer) lowerCamelCase : Dict = (self.model_tester.image_size // 4) ** 2 lowerCamelCase : int = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , ) def a__ ( self: List[Any] )-> Dict: def check_hidden_states_output(__a: Any , __a: Union[str, Any] , __a: str ): lowerCamelCase : List[str] = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): lowerCamelCase : str = model(**self._prepare_for_class(__a , __a ) ) lowerCamelCase : Dict = outputs.hidden_states lowerCamelCase : int = self.model_tester.num_encoder_blocks self.assertEqual(len(__a ) , __a ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.hidden_sizes[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCamelCase : List[Any] = True check_hidden_states_output(__a , __a , __a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase : str = True check_hidden_states_output(__a , __a , __a ) def a__ ( self: Union[str, Any] )-> List[Any]: if not self.model_tester.is_training: return lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase : List[Any] = True for model_class in self.all_model_classes: if model_class in get_values(__a ): continue lowerCamelCase : Optional[int] = model_class(__a ) model.to(__a ) model.train() lowerCamelCase : List[Any] = self._prepare_for_class(__a , __a , return_labels=__a ) lowerCamelCase : Any = model(**__a ).loss loss.backward() @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def a__ ( self: Union[str, Any] )-> List[Any]: pass @slow def a__ ( self: Any )-> int: for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase : List[str] = SegformerModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def snake_case ( ) -> Any: lowerCamelCase : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch class A__ ( unittest.TestCase): """simple docstring""" @slow def a__ ( self: Dict )-> Union[str, Any]: # only resize + normalize lowerCamelCase : str = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=__a , align=__a , do_random_crop=__a ) lowerCamelCase : str = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to( __a ) lowerCamelCase : str = prepare_img() lowerCamelCase : List[Any] = image_processor(images=__a , return_tensors="""pt""" ) lowerCamelCase : Optional[Any] = encoded_inputs.pixel_values.to(__a ) with torch.no_grad(): lowerCamelCase : Optional[Any] = model(__a ) lowerCamelCase : List[Any] = torch.Size((1, model.config.num_labels, 128, 128) ) self.assertEqual(outputs.logits.shape , __a ) lowerCamelCase : int = torch.tensor( [ [[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]], [[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]], [[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]], ] ).to(__a ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __a , atol=1e-4 ) ) @slow def a__ ( self: Optional[Any] )-> List[Any]: # only resize + normalize lowerCamelCase : List[Any] = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=__a , align=__a , do_random_crop=__a ) lowerCamelCase : Dict = SegformerForSemanticSegmentation.from_pretrained( """nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(__a ) lowerCamelCase : Union[str, Any] = prepare_img() lowerCamelCase : List[str] = image_processor(images=__a , return_tensors="""pt""" ) lowerCamelCase : Any = encoded_inputs.pixel_values.to(__a ) with torch.no_grad(): lowerCamelCase : Any = model(__a ) lowerCamelCase : List[Any] = torch.Size((1, model.config.num_labels, 128, 128) ) self.assertEqual(outputs.logits.shape , __a ) lowerCamelCase : Dict = torch.tensor( [ [[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]], [[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]], [[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]], ] ).to(__a ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __a , atol=1e-1 ) ) @slow def a__ ( self: Tuple )-> Dict: # only resize + normalize lowerCamelCase : int = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=__a , align=__a , do_random_crop=__a ) lowerCamelCase : Dict = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to( __a ) lowerCamelCase : Tuple = prepare_img() lowerCamelCase : Dict = image_processor(images=__a , return_tensors="""pt""" ) lowerCamelCase : int = encoded_inputs.pixel_values.to(__a ) with torch.no_grad(): lowerCamelCase : List[str] = model(__a ) lowerCamelCase : Optional[Any] = outputs.logits.detach().cpu() lowerCamelCase : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=__a , target_sizes=[(500, 300)] ) lowerCamelCase : Union[str, Any] = torch.Size((500, 300) ) self.assertEqual(segmentation[0].shape , __a ) lowerCamelCase : Any = image_processor.post_process_semantic_segmentation(outputs=__a ) lowerCamelCase : List[str] = torch.Size((128, 128) ) self.assertEqual(segmentation[0].shape , __a )
714
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase :Optional[int] = logging.get_logger(__name__) __lowerCamelCase :List[str] = { 'google/realm-cc-news-pretrained-embedder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-encoder': ( 'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-scorer': ( 'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json' ), 'google/realm-cc-news-pretrained-openqa': ( 'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json' ), 'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json', 'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json', 'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json', 'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json', # See all REALM models at https://huggingface.co/models?filter=realm } class A__ ( __lowercase): """simple docstring""" snake_case__ : Optional[Any] ='''realm''' def __init__( self: Union[str, Any] , __a: List[Any]=30_522 , __a: List[Any]=768 , __a: List[Any]=128 , __a: Union[str, Any]=12 , __a: Union[str, Any]=12 , __a: Optional[Any]=8 , __a: Dict=3_072 , __a: List[Any]="gelu_new" , __a: List[Any]=0.1 , __a: Tuple=0.1 , __a: Optional[Any]=512 , __a: Optional[int]=2 , __a: str=0.02 , __a: int=1e-1_2 , __a: Optional[Any]=256 , __a: Any=10 , __a: Dict=1e-3 , __a: Optional[Any]=5 , __a: Dict=320 , __a: Tuple=13_353_718 , __a: List[Any]=5_000 , __a: Dict=1 , __a: int=0 , __a: Dict=2 , **__a: List[str] , )-> Any: super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a ) # Common config lowerCamelCase : Optional[Any] = vocab_size lowerCamelCase : str = max_position_embeddings lowerCamelCase : Dict = hidden_size lowerCamelCase : Dict = retriever_proj_size lowerCamelCase : Optional[Any] = num_hidden_layers lowerCamelCase : List[str] = num_attention_heads lowerCamelCase : Tuple = num_candidates lowerCamelCase : int = intermediate_size lowerCamelCase : Dict = hidden_act lowerCamelCase : List[str] = hidden_dropout_prob lowerCamelCase : Dict = attention_probs_dropout_prob lowerCamelCase : Optional[int] = initializer_range lowerCamelCase : Dict = type_vocab_size lowerCamelCase : Optional[Any] = layer_norm_eps # Reader config lowerCamelCase : List[str] = span_hidden_size lowerCamelCase : Dict = max_span_width lowerCamelCase : Optional[Any] = reader_layer_norm_eps lowerCamelCase : Optional[int] = reader_beam_size lowerCamelCase : List[Any] = reader_seq_len # Retrieval config lowerCamelCase : int = num_block_records lowerCamelCase : Dict = searcher_beam_size
42
0
"""simple docstring""" import unittest from transformers import is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow if is_flax_available(): import optax from flax.training.common_utils import onehot from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration from transformers.models.ta.modeling_flax_ta import shift_tokens_right @require_torch @require_sentencepiece @require_tokenizers @require_flax class A__ ( unittest.TestCase): """simple docstring""" @slow def a__ ( self: Dict )-> Optional[Any]: lowerCamelCase : Union[str, Any] = FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" ) lowerCamelCase : int = AutoTokenizer.from_pretrained("""google/mt5-small""" ) lowerCamelCase : Any = tokenizer("""Hello there""" , return_tensors="""np""" ).input_ids lowerCamelCase : List[str] = tokenizer("""Hi I am""" , return_tensors="""np""" ).input_ids lowerCamelCase : Any = shift_tokens_right(lowercase_ , model.config.pad_token_id , model.config.decoder_start_token_id ) lowerCamelCase : int = model(lowercase_ , decoder_input_ids=lowercase_ ).logits lowerCamelCase : List[Any] = optax.softmax_cross_entropy(lowercase_ , onehot(lowercase_ , logits.shape[-1] ) ).mean() lowerCamelCase : Any = -(labels.shape[-1] * loss.item()) lowerCamelCase : str = -84.91_27 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
715
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase :Tuple = logging.get_logger(__name__) __lowerCamelCase :Any = { 'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json', # See all GLPN models at https://huggingface.co/models?filter=glpn } class A__ ( __lowercase): """simple docstring""" snake_case__ : Tuple ='''glpn''' def __init__( self: Dict , __a: List[str]=3 , __a: Optional[int]=4 , __a: Dict=[2, 2, 2, 2] , __a: str=[8, 4, 2, 1] , __a: Optional[int]=[32, 64, 160, 256] , __a: Dict=[7, 3, 3, 3] , __a: Dict=[4, 2, 2, 2] , __a: Optional[Any]=[1, 2, 5, 8] , __a: Tuple=[4, 4, 4, 4] , __a: int="gelu" , __a: Union[str, Any]=0.0 , __a: str=0.0 , __a: Union[str, Any]=0.02 , __a: str=0.1 , __a: Union[str, Any]=1e-6 , __a: Any=64 , __a: Dict=10 , __a: Union[str, Any]=-1 , **__a: Optional[Any] , )-> Dict: super().__init__(**__a ) lowerCamelCase : Dict = num_channels lowerCamelCase : Any = num_encoder_blocks lowerCamelCase : Dict = depths lowerCamelCase : List[str] = sr_ratios lowerCamelCase : Dict = hidden_sizes lowerCamelCase : Tuple = patch_sizes lowerCamelCase : Optional[int] = strides lowerCamelCase : Optional[Any] = mlp_ratios lowerCamelCase : Union[str, Any] = num_attention_heads lowerCamelCase : List[str] = hidden_act lowerCamelCase : Any = hidden_dropout_prob lowerCamelCase : Optional[int] = attention_probs_dropout_prob lowerCamelCase : List[Any] = initializer_range lowerCamelCase : Dict = drop_path_rate lowerCamelCase : Any = layer_norm_eps lowerCamelCase : Optional[Any] = decoder_hidden_size lowerCamelCase : Tuple = max_depth lowerCamelCase : Optional[Any] = head_in_index
42
0
"""simple docstring""" import qiskit def snake_case ( UpperCamelCase__ : int = 2 ) -> qiskit.result.counts.Counts: lowerCamelCase : str = qubits # Using Aer's simulator lowerCamelCase : Optional[int] = qiskit.Aer.get_backend("""aer_simulator""" ) # Creating a Quantum Circuit acting on the q register lowerCamelCase : Dict = qiskit.QuantumCircuit(snake_case__ , snake_case__ ) # Adding a H gate on qubit 0 (now q0 in superposition) circuit.h(0 ) for i in range(1 , snake_case__ ): # Adding CX (CNOT) gate circuit.cx(i - 1 , snake_case__ ) # Mapping the quantum measurement to the classical bits circuit.measure(list(range(snake_case__ ) ) , list(range(snake_case__ ) ) ) # Now measuring any one qubit would affect other qubits to collapse # their super position and have same state as the measured one. # Executing the circuit on the simulator lowerCamelCase : List[Any] = qiskit.execute(snake_case__ , snake_case__ , shots=1000 ) return job.result().get_counts(snake_case__ ) if __name__ == "__main__": print(f"""Total count for various states are: {quantum_entanglement(3)}""")
716
"""simple docstring""" from __future__ import annotations import math def snake_case ( UpperCamelCase__ : float , UpperCamelCase__ : int ) -> float: lowerCamelCase : Dict = u for i in range(1 , UpperCamelCase__ ): lowerCamelCase : List[str] = temp * (u - i) return temp def snake_case ( ) -> None: lowerCamelCase : List[Any] = int(input("""enter the numbers of values: """ ) ) lowerCamelCase : list[list[float]] = [] for _ in range(UpperCamelCase__ ): y.append([] ) for i in range(UpperCamelCase__ ): for j in range(UpperCamelCase__ ): y[i].append(UpperCamelCase__ ) lowerCamelCase : Union[str, Any] = 0 print("""enter the values of parameters in a list: """ ) lowerCamelCase : Any = list(map(UpperCamelCase__ , input().split() ) ) print("""enter the values of corresponding parameters: """ ) for i in range(UpperCamelCase__ ): lowerCamelCase : int = float(input() ) lowerCamelCase : Dict = int(input("""enter the value to interpolate: """ ) ) lowerCamelCase : List[Any] = (value - x[0]) / (x[1] - x[0]) # for calculating forward difference table for i in range(1 , UpperCamelCase__ ): for j in range(n - i ): lowerCamelCase : str = y[j + 1][i - 1] - y[j][i - 1] lowerCamelCase : Any = y[0][0] for i in range(1 , UpperCamelCase__ ): summ += (ucal(UpperCamelCase__ , UpperCamelCase__ ) * y[0][i]) / math.factorial(UpperCamelCase__ ) print(F'the value at {value} is {summ}' ) if __name__ == "__main__": main()
42
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available __lowerCamelCase :Dict = { 'configuration_ernie': ['ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ErnieConfig', 'ErnieOnnxConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase :List[str] = [ 'ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST', 'ErnieForCausalLM', 'ErnieForMaskedLM', 'ErnieForMultipleChoice', 'ErnieForNextSentencePrediction', 'ErnieForPreTraining', 'ErnieForQuestionAnswering', 'ErnieForSequenceClassification', 'ErnieForTokenClassification', 'ErnieModel', 'ErniePreTrainedModel', ] if TYPE_CHECKING: from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ernie import ( ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST, ErnieForCausalLM, ErnieForMaskedLM, ErnieForMultipleChoice, ErnieForNextSentencePrediction, ErnieForPreTraining, ErnieForQuestionAnswering, ErnieForSequenceClassification, ErnieForTokenClassification, ErnieModel, ErniePreTrainedModel, ) else: import sys __lowerCamelCase :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
717
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available __lowerCamelCase :str = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase :Optional[Any] = ['GPTSw3Tokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_swa import GPTSwaTokenizer else: import sys __lowerCamelCase :Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
42
0
"""simple docstring""" from scipy.stats import spearmanr import datasets __lowerCamelCase :Optional[Any] = """ The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. """ __lowerCamelCase :Any = """ Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {'spearmanr': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results['spearmanr']) -0.7 >>> print(round(results['spearmanr_pvalue'], 2)) 0.19 """ __lowerCamelCase :int = r"""\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class A__ ( datasets.Metric): """simple docstring""" def a__ ( self: Any )-> List[str]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""float""" ), """references""": datasets.Value("""float""" ), } ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] , ) def a__ ( self: Optional[int] , __a: Any , __a: int , __a: Any=False )-> List[Any]: lowerCamelCase : List[str] = spearmanr(snake_case__ , snake_case__ ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
718
"""simple docstring""" import argparse import json from collections import OrderedDict from functools import partial from pathlib import Path import timm import torch from huggingface_hub import hf_hub_download from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor from transformers.utils import logging logging.set_verbosity_info() __lowerCamelCase :Dict = logging.get_logger() def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : LevitConfig , UpperCamelCase__ : Path , UpperCamelCase__ : bool = True ) -> Dict: print(F'Converting {name}...' ) with torch.no_grad(): if hidden_sizes == 128: if name[-1] == "S": lowerCamelCase : Optional[Any] = timm.create_model("""levit_128s""" , pretrained=UpperCamelCase__ ) else: lowerCamelCase : Dict = timm.create_model("""levit_128""" , pretrained=UpperCamelCase__ ) if hidden_sizes == 192: lowerCamelCase : Tuple = timm.create_model("""levit_192""" , pretrained=UpperCamelCase__ ) if hidden_sizes == 256: lowerCamelCase : Optional[int] = timm.create_model("""levit_256""" , pretrained=UpperCamelCase__ ) if hidden_sizes == 384: lowerCamelCase : Dict = timm.create_model("""levit_384""" , pretrained=UpperCamelCase__ ) from_model.eval() lowerCamelCase : Optional[Any] = LevitForImageClassificationWithTeacher(UpperCamelCase__ ).eval() lowerCamelCase : Tuple = OrderedDict() lowerCamelCase : Optional[Any] = from_model.state_dict() lowerCamelCase : str = list(from_model.state_dict().keys() ) lowerCamelCase : List[Any] = list(our_model.state_dict().keys() ) print(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) ) for i in range(len(UpperCamelCase__ ) ): lowerCamelCase : str = weights[og_keys[i]] our_model.load_state_dict(UpperCamelCase__ ) lowerCamelCase : int = torch.randn((2, 3, 224, 224) ) lowerCamelCase : Any = from_model(UpperCamelCase__ ) lowerCamelCase : List[Any] = our_model(UpperCamelCase__ ).logits assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ ), "The model logits don't match the original one." lowerCamelCase : Dict = name print(UpperCamelCase__ ) if push_to_hub: our_model.save_pretrained(save_directory / checkpoint_name ) lowerCamelCase : Optional[int] = LevitImageProcessor() image_processor.save_pretrained(save_directory / checkpoint_name ) print(F'Pushed {checkpoint_name}' ) def snake_case ( UpperCamelCase__ : Path , UpperCamelCase__ : str = None , UpperCamelCase__ : bool = True ) -> Optional[int]: lowerCamelCase : Optional[Any] = """imagenet-1k-id2label.json""" lowerCamelCase : List[Any] = 1000 lowerCamelCase : Dict = (1, num_labels) lowerCamelCase : List[Any] = """huggingface/label-files""" lowerCamelCase : Optional[int] = num_labels lowerCamelCase : List[str] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) ) lowerCamelCase : Any = {int(UpperCamelCase__ ): v for k, v in idalabel.items()} lowerCamelCase : List[Any] = idalabel lowerCamelCase : str = {v: k for k, v in idalabel.items()} lowerCamelCase : Tuple = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ ) lowerCamelCase : Optional[int] = { """levit-128S""": 128, """levit-128""": 128, """levit-192""": 192, """levit-256""": 256, """levit-384""": 384, } lowerCamelCase : List[Any] = { """levit-128S""": ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), """levit-128""": ImageNetPreTrainedConfig( hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ), """levit-192""": ImageNetPreTrainedConfig( hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), """levit-256""": ImageNetPreTrainedConfig( hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ), """levit-384""": ImageNetPreTrainedConfig( hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ), } if model_name: convert_weight_and_push( names_to_hidden_sizes[model_name] , UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(names_to_hidden_sizes[model_name] , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return config, expected_shape if __name__ == "__main__": __lowerCamelCase :Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default=None, type=str, help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,', ) parser.add_argument( '--pytorch_dump_folder_path', default='levit-dump-folder/', type=Path, required=False, help='Path to the output PyTorch model directory.', ) parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') parser.add_argument( '--no-push_to_hub', dest='push_to_hub', action='store_false', help='Do not push model and image processor to the hub', ) __lowerCamelCase :List[Any] = parser.parse_args() __lowerCamelCase :Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
42
0
"""simple docstring""" # Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #################################################################################################### # # Note: If when running this conversion script you're getting an exception: # ModuleNotFoundError: No module named 'megatron.model.enums' # you need to tell python where to find the clone of Megatron-LM, e.g.: # # cd /tmp # git clone https://github.com/NVIDIA/Megatron-LM # PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ... # # if you already have it cloned elsewhere, simply adjust the path to the existing path # # If the training was done using a Megatron-LM fork, e.g., # https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one # in your path, i.e., /path/to/Megatron-DeepSpeed/ # import argparse import os import re import zipfile import torch from transformers import AutoTokenizer, GPTaConfig def snake_case ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str]=0 ) -> Optional[Any]: if name is None: lowerCamelCase : Any = None else: lowerCamelCase : Dict = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}""" lowerCamelCase : Dict = fmt.format(__a ) # Print and recurse (if needed). if isinstance(__a , __a ): if msg is not None: print(__a ) for k in val.keys(): recursive_print(__a , val[k] , spaces + 2 ) elif isinstance(__a , torch.Tensor ): print(__a , """:""" , val.size() ) else: print(__a , """:""" , __a ) def snake_case ( UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Tuple ) -> Optional[int]: lowerCamelCase : str = param.size() if checkpoint_version == 1.0: # version 1.0 stores [num_heads * hidden_size * num_splits, :] lowerCamelCase : Tuple = (num_heads, hidden_size, num_splits) + input_shape[1:] lowerCamelCase : Tuple = param.view(*__a ) lowerCamelCase : Dict = param.transpose(0 , 2 ) lowerCamelCase : Tuple = param.transpose(1 , 2 ).contiguous() elif checkpoint_version >= 2.0: # other versions store [num_heads * num_splits * hidden_size, :] lowerCamelCase : int = (num_heads, num_splits, hidden_size) + input_shape[1:] lowerCamelCase : str = param.view(*__a ) lowerCamelCase : List[Any] = param.transpose(0 , 1 ).contiguous() lowerCamelCase : Tuple = param.view(*__a ) return param def snake_case ( UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] ) -> Any: lowerCamelCase : Dict = {} # old versions did not store training args lowerCamelCase : List[str] = input_state_dict.get("""args""" , __a ) if ds_args is not None: # do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint # from pprint import pprint # pprint(vars(ds_args)) lowerCamelCase : Any = ds_args.padded_vocab_size lowerCamelCase : List[Any] = ds_args.max_position_embeddings lowerCamelCase : Any = ds_args.hidden_size lowerCamelCase : Tuple = ds_args.num_layers lowerCamelCase : Any = ds_args.num_attention_heads lowerCamelCase : Optional[int] = ds_args.ffn_hidden_size # pprint(config) # The number of heads. lowerCamelCase : Union[str, Any] = config.n_head # The hidden_size per head. lowerCamelCase : List[Any] = config.n_embd // config.n_head # Megatron-LM checkpoint version if "checkpoint_version" in input_state_dict.keys(): lowerCamelCase : str = input_state_dict["""checkpoint_version"""] else: lowerCamelCase : str = 0.0 # The model. lowerCamelCase : Optional[int] = input_state_dict["""model"""] # The language model. lowerCamelCase : Union[str, Any] = model["""language_model"""] # The embeddings. lowerCamelCase : str = lm["""embedding"""] # The word embeddings. lowerCamelCase : List[str] = embeddings["""word_embeddings"""]["""weight"""] # Truncate the embedding table to vocab_size rows. lowerCamelCase : Optional[int] = word_embeddings[: config.vocab_size, :] lowerCamelCase : Union[str, Any] = word_embeddings # The position embeddings. lowerCamelCase : List[Any] = embeddings["""position_embeddings"""]["""weight"""] # Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size] lowerCamelCase : List[Any] = pos_embeddings.size(0 ) if n_positions != config.n_positions: raise ValueError( F'pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match' ) # Store the position embeddings. lowerCamelCase : Optional[int] = pos_embeddings # The transformer. lowerCamelCase : int = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""] # The regex to extract layer names. lowerCamelCase : Any = re.compile(R"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" ) # The simple map of names for "automated" rules. lowerCamelCase : Optional[Any] = { """attention.dense""": """.attn.c_proj.""", """self_attention.dense""": """.attn.c_proj.""", """mlp.dense_h_to_4h""": """.mlp.c_fc.""", """mlp.dense_4h_to_h""": """.mlp.c_proj.""", } # Extract the layers. for key, val in transformer.items(): # Match the name. lowerCamelCase : Optional[Any] = layer_re.match(__a ) # Stop if that's not a layer if m is None: break # The index of the layer. lowerCamelCase : Optional[int] = int(m.group(1 ) ) # The name of the operation. lowerCamelCase : int = m.group(2 ) # Is it a weight or a bias? lowerCamelCase : Union[str, Any] = m.group(3 ) # The name of the layer. lowerCamelCase : List[Any] = F'transformer.h.{layer_idx}' # For layernorm(s), simply store the layer norm. if op_name.endswith("""layernorm""" ): lowerCamelCase : int = """ln_1""" if op_name.startswith("""input""" ) else """ln_2""" lowerCamelCase : str = val # Transpose the QKV matrix. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "weight": # Insert a tensor of 1x1xDxD bias. lowerCamelCase : Dict = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view( 1 , 1 , __a , __a ) lowerCamelCase : str = causal_mask # Insert a "dummy" tensor for masked_bias. lowerCamelCase : Dict = torch.tensor(-1E4 , dtype=torch.floataa ) lowerCamelCase : int = masked_bias lowerCamelCase : Tuple = fix_query_key_value_ordering(__a , __a , 3 , __a , __a ) # Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D. lowerCamelCase : List[str] = out_val.transpose(0 , 1 ).contiguous() # Store. lowerCamelCase : Union[str, Any] = out_val # Transpose the bias. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "bias": lowerCamelCase : int = fix_query_key_value_ordering(__a , __a , 3 , __a , __a ) # Store. No change of shape. lowerCamelCase : Optional[int] = out_val # Transpose the weights. elif weight_or_bias == "weight": lowerCamelCase : Optional[Any] = megatron_to_transformers[op_name] lowerCamelCase : List[str] = val.transpose(0 , 1 ) # Copy the bias. elif weight_or_bias == "bias": lowerCamelCase : str = megatron_to_transformers[op_name] lowerCamelCase : Union[str, Any] = val # DEBUG. assert config.n_layer == layer_idx + 1 # The final layernorm. lowerCamelCase : List[Any] = transformer["""final_layernorm.weight"""] lowerCamelCase : Optional[Any] = transformer["""final_layernorm.bias"""] # For LM head, transformers' wants the matrix to weight embeddings. lowerCamelCase : Tuple = word_embeddings # It should be done! return output_state_dict def snake_case ( ) -> List[str]: lowerCamelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" ) parser.add_argument( """path_to_checkpoint""" , type=__a , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , ) parser.add_argument( """--config_file""" , default="""""" , type=__a , help="""An optional config json file describing the pre-trained model.""" , ) lowerCamelCase : Dict = parser.parse_args() # Extract the basename. lowerCamelCase : Dict = os.path.dirname(args.path_to_checkpoint ) # Load the model. # the .zip is very optional, let's keep it for backward compatibility print(F'Extracting PyTorch state dictionary from {args.path_to_checkpoint}' ) if args.path_to_checkpoint.endswith(""".zip""" ): with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint: with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict: lowerCamelCase : List[Any] = torch.load(__a , map_location="""cpu""" ) else: lowerCamelCase : str = torch.load(args.path_to_checkpoint , map_location="""cpu""" ) lowerCamelCase : int = input_state_dict.get("""args""" , __a ) # Read the config, or default to the model released by NVIDIA. if args.config_file == "": if ds_args is not None: if ds_args.bias_gelu_fusion: lowerCamelCase : Tuple = """gelu_fast""" elif ds_args.openai_gelu: lowerCamelCase : Union[str, Any] = """gelu_new""" else: lowerCamelCase : Tuple = """gelu""" else: # in the very early days this used to be "gelu_new" lowerCamelCase : Tuple = """gelu_new""" # Spell out all parameters in case the defaults change. lowerCamelCase : List[Any] = GPTaConfig( vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=__a , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.0_2 , summary_type="""cls_index""" , summary_use_proj=__a , summary_activation=__a , summary_proj_to_labels=__a , summary_first_dropout=0.1 , scale_attn_weights=__a , use_cache=__a , bos_token_id=50256 , eos_token_id=50256 , ) else: lowerCamelCase : Any = GPTaConfig.from_json_file(args.config_file ) lowerCamelCase : List[Any] = ["""GPT2LMHeadModel"""] # Convert. print("""Converting""" ) lowerCamelCase : int = convert_megatron_checkpoint(__a , __a , __a ) # Print the structure of converted state dict. if args.print_checkpoint_structure: recursive_print(__a , __a ) # Add tokenizer class info to config # see https://github.com/huggingface/transformers/issues/13906) if ds_args is not None: lowerCamelCase : Optional[Any] = ds_args.tokenizer_type if tokenizer_type == "GPT2BPETokenizer": lowerCamelCase : int = """gpt2""" elif tokenizer_type == "PretrainedFromHF": lowerCamelCase : Any = ds_args.tokenizer_name_or_path else: raise ValueError(F'Unrecognized tokenizer_type {tokenizer_type}' ) else: lowerCamelCase : List[Any] = """gpt2""" lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained(__a ) lowerCamelCase : Dict = type(__a ).__name__ lowerCamelCase : Dict = tokenizer_class # Store the config to file. print("""Saving config""" ) config.save_pretrained(__a ) # Save tokenizer based on args print(F'Adding {tokenizer_class} tokenizer files' ) tokenizer.save_pretrained(__a ) # Store the state_dict to file. lowerCamelCase : Optional[int] = os.path.join(__a , """pytorch_model.bin""" ) print(F'Saving checkpoint to \"{output_checkpoint_file}\"' ) torch.save(__a , __a ) #################################################################################################### if __name__ == "__main__": main() ####################################################################################################
719
"""simple docstring""" import torch from diffusers import KDPMaDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class A__ ( __lowercase): """simple docstring""" snake_case__ : Tuple =(KDPMaDiscreteScheduler,) snake_case__ : Tuple =10 def a__ ( self: List[Any] , **__a: Optional[int] )-> Union[str, Any]: lowerCamelCase : int = { """num_train_timesteps""": 1_100, """beta_start""": 0.00_01, """beta_end""": 0.02, """beta_schedule""": """linear""", } config.update(**__a ) return config def a__ ( self: Union[str, Any] )-> Any: for timesteps in [10, 50, 100, 1_000]: self.check_over_configs(num_train_timesteps=__a ) def a__ ( self: str )-> int: for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ): self.check_over_configs(beta_start=__a , beta_end=__a ) def a__ ( self: int )-> Union[str, Any]: for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=__a ) def a__ ( self: List[Any] )-> List[Any]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__a ) def a__ ( self: Union[str, Any] )-> int: lowerCamelCase : List[str] = self.scheduler_classes[0] lowerCamelCase : Union[str, Any] = self.get_scheduler_config(prediction_type="""v_prediction""" ) lowerCamelCase : List[str] = scheduler_class(**__a ) scheduler.set_timesteps(self.num_inference_steps ) lowerCamelCase : Dict = self.dummy_model() lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCamelCase : List[Any] = sample.to(__a ) for i, t in enumerate(scheduler.timesteps ): lowerCamelCase : Optional[Any] = scheduler.scale_model_input(__a , __a ) lowerCamelCase : Optional[int] = model(__a , __a ) lowerCamelCase : Tuple = scheduler.step(__a , __a , __a ) lowerCamelCase : Optional[Any] = output.prev_sample lowerCamelCase : List[str] = torch.sum(torch.abs(__a ) ) lowerCamelCase : Tuple = torch.mean(torch.abs(__a ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 4.6_9_3_4e-0_7 ) < 1e-2 assert abs(result_mean.item() - 6.1_1_1_2e-1_0 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7 ) < 1e-2 assert abs(result_mean.item() - 0.00_02 ) < 1e-3 def a__ ( self: Any )-> Any: if torch_device == "mps": return lowerCamelCase : Dict = self.scheduler_classes[0] lowerCamelCase : Dict = self.get_scheduler_config() lowerCamelCase : int = scheduler_class(**__a ) scheduler.set_timesteps(self.num_inference_steps ) lowerCamelCase : List[Any] = self.dummy_model() lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCamelCase : Optional[int] = sample.to(__a ) for i, t in enumerate(scheduler.timesteps ): lowerCamelCase : Dict = scheduler.scale_model_input(__a , __a ) lowerCamelCase : Optional[Any] = model(__a , __a ) lowerCamelCase : Tuple = scheduler.step(__a , __a , __a ) lowerCamelCase : str = output.prev_sample lowerCamelCase : Tuple = torch.sum(torch.abs(__a ) ) lowerCamelCase : Tuple = torch.mean(torch.abs(__a ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 20.41_25 ) < 1e-2 assert abs(result_mean.item() - 0.02_66 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 20.41_25 ) < 1e-2 assert abs(result_mean.item() - 0.02_66 ) < 1e-3 def a__ ( self: Optional[Any] )-> List[Any]: if torch_device == "mps": return lowerCamelCase : Any = self.scheduler_classes[0] lowerCamelCase : Union[str, Any] = self.get_scheduler_config() lowerCamelCase : Optional[Any] = scheduler_class(**__a ) scheduler.set_timesteps(self.num_inference_steps , device=__a ) lowerCamelCase : Union[str, Any] = self.dummy_model() lowerCamelCase : List[str] = self.dummy_sample_deter.to(__a ) * scheduler.init_noise_sigma for t in scheduler.timesteps: lowerCamelCase : Union[str, Any] = scheduler.scale_model_input(__a , __a ) lowerCamelCase : Optional[int] = model(__a , __a ) lowerCamelCase : int = scheduler.step(__a , __a , __a ) lowerCamelCase : int = output.prev_sample lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(__a ) ) lowerCamelCase : int = torch.mean(torch.abs(__a ) ) if str(__a ).startswith("""cpu""" ): # The following sum varies between 148 and 156 on mps. Why? assert abs(result_sum.item() - 20.41_25 ) < 1e-2 assert abs(result_mean.item() - 0.02_66 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 20.41_25 ) < 1e-2 assert abs(result_mean.item() - 0.02_66 ) < 1e-3
42
0
"""simple docstring""" import unittest from transformers import XLMConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST class A__ : """simple docstring""" def __init__( self: int , __a: Any , __a: List[Any]=13 , __a: Optional[Any]=7 , __a: Optional[int]=True , __a: Any=True , __a: Optional[Any]=True , __a: Any=True , __a: int=True , __a: Optional[int]=False , __a: Dict=False , __a: List[Any]=False , __a: str=2 , __a: List[str]=99 , __a: int=0 , __a: int=32 , __a: List[Any]=5 , __a: Tuple=4 , __a: Dict=0.1 , __a: Tuple=0.1 , __a: str=512 , __a: Optional[int]=2 , __a: Tuple=0.02 , __a: List[str]=2 , __a: Optional[int]=4 , __a: Union[str, Any]="last" , __a: Union[str, Any]=True , __a: Optional[Any]=None , __a: int=0 , )-> Dict: lowerCamelCase : List[str] = parent lowerCamelCase : Tuple = batch_size lowerCamelCase : Union[str, Any] = seq_length lowerCamelCase : Dict = is_training lowerCamelCase : Dict = use_input_lengths lowerCamelCase : List[str] = use_token_type_ids lowerCamelCase : Dict = use_labels lowerCamelCase : int = gelu_activation lowerCamelCase : List[str] = sinusoidal_embeddings lowerCamelCase : Tuple = causal lowerCamelCase : Tuple = asm lowerCamelCase : List[Any] = n_langs lowerCamelCase : List[Any] = vocab_size lowerCamelCase : List[Any] = n_special lowerCamelCase : Dict = hidden_size lowerCamelCase : str = num_hidden_layers lowerCamelCase : Optional[Any] = num_attention_heads lowerCamelCase : Dict = hidden_dropout_prob lowerCamelCase : Optional[Any] = attention_probs_dropout_prob lowerCamelCase : str = max_position_embeddings lowerCamelCase : List[Any] = type_sequence_label_size lowerCamelCase : Optional[int] = initializer_range lowerCamelCase : Optional[Any] = num_labels lowerCamelCase : str = num_choices lowerCamelCase : Tuple = summary_type lowerCamelCase : Tuple = use_proj lowerCamelCase : List[Any] = scope lowerCamelCase : int = bos_token_id def a__ ( self: Optional[Any] )-> str: lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase : Union[str, Any] = None if self.use_input_lengths: lowerCamelCase : int = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowerCamelCase : Optional[int] = None if self.use_token_type_ids: lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) lowerCamelCase : Optional[int] = None lowerCamelCase : str = None lowerCamelCase : Optional[Any] = None if self.use_labels: lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , 2 ).float() lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase : Optional[int] = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def a__ ( self: Tuple )-> List[Any]: return XLMConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , ) def a__ ( self: int , __a: int , __a: Dict , __a: Any , __a: str , __a: Dict , __a: Optional[int] , __a: Optional[int] , __a: List[Any] , __a: Optional[Any] , )-> Any: lowerCamelCase : Tuple = XLMModel(config=snake_case_ ) model.to(snake_case_ ) model.eval() lowerCamelCase : List[str] = model(snake_case_ , lengths=snake_case_ , langs=snake_case_ ) lowerCamelCase : Optional[Any] = model(snake_case_ , langs=snake_case_ ) lowerCamelCase : str = model(snake_case_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a__ ( self: List[Any] , __a: List[Any] , __a: List[Any] , __a: Any , __a: Optional[int] , __a: int , __a: Tuple , __a: Dict , __a: Union[str, Any] , __a: str , )-> Optional[Any]: lowerCamelCase : Optional[Any] = XLMWithLMHeadModel(snake_case_ ) model.to(snake_case_ ) model.eval() lowerCamelCase : int = model(snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a__ ( self: Any , __a: List[Any] , __a: List[str] , __a: Optional[int] , __a: Dict , __a: Union[str, Any] , __a: Dict , __a: int , __a: Optional[int] , __a: Optional[Any] , )-> Tuple: lowerCamelCase : List[Any] = XLMForQuestionAnsweringSimple(snake_case_ ) model.to(snake_case_ ) model.eval() lowerCamelCase : Optional[int] = model(snake_case_ ) lowerCamelCase : Tuple = model(snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ ) lowerCamelCase : Union[str, Any] = outputs self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def a__ ( self: Optional[Any] , __a: Optional[Any] , __a: List[Any] , __a: int , __a: Union[str, Any] , __a: Dict , __a: Dict , __a: str , __a: int , __a: Optional[int] , )-> Dict: lowerCamelCase : Union[str, Any] = XLMForQuestionAnswering(snake_case_ ) model.to(snake_case_ ) model.eval() lowerCamelCase : int = model(snake_case_ ) lowerCamelCase : List[Any] = model( snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , cls_index=snake_case_ , is_impossible=snake_case_ , p_mask=snake_case_ , ) lowerCamelCase : Optional[Any] = model( snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , cls_index=snake_case_ , is_impossible=snake_case_ , ) (lowerCamelCase ) : Dict = result_with_labels.to_tuple() lowerCamelCase : Optional[Any] = model(snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ ) (lowerCamelCase ) : List[str] = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def a__ ( self: Union[str, Any] , __a: List[Any] , __a: str , __a: List[Any] , __a: List[str] , __a: Dict , __a: Dict , __a: List[Any] , __a: Any , __a: str , )-> Dict: lowerCamelCase : Union[str, Any] = XLMForSequenceClassification(snake_case_ ) model.to(snake_case_ ) model.eval() lowerCamelCase : Optional[int] = model(snake_case_ ) lowerCamelCase : Union[str, Any] = model(snake_case_ , labels=snake_case_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def a__ ( self: Dict , __a: Union[str, Any] , __a: List[Any] , __a: Union[str, Any] , __a: str , __a: Union[str, Any] , __a: str , __a: Tuple , __a: str , __a: int , )-> Dict: lowerCamelCase : List[str] = self.num_labels lowerCamelCase : Union[str, Any] = XLMForTokenClassification(snake_case_ ) model.to(snake_case_ ) model.eval() lowerCamelCase : Optional[Any] = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def a__ ( self: List[Any] , __a: Optional[int] , __a: str , __a: Dict , __a: Optional[int] , __a: Optional[Any] , __a: str , __a: Optional[Any] , __a: Any , __a: Dict , )-> List[str]: lowerCamelCase : List[str] = self.num_choices lowerCamelCase : Union[str, Any] = XLMForMultipleChoice(config=snake_case_ ) model.to(snake_case_ ) model.eval() lowerCamelCase : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase : List[Any] = model( snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def a__ ( self: Optional[Any] )-> Tuple: lowerCamelCase : Dict = self.prepare_config_and_inputs() ( lowerCamelCase ) : Any = config_and_inputs lowerCamelCase : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths} return config, inputs_dict @require_torch class A__ ( _snake_case , _snake_case , _snake_case , unittest.TestCase): """simple docstring""" snake_case__ : Dict =( ( XLMModel, XLMWithLMHeadModel, XLMForQuestionAnswering, XLMForSequenceClassification, XLMForQuestionAnsweringSimple, XLMForTokenClassification, XLMForMultipleChoice, ) if is_torch_available() else () ) snake_case__ : Union[str, Any] =( (XLMWithLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Check other models whether language generation is also applicable snake_case__ : Optional[int] =( { """feature-extraction""": XLMModel, """fill-mask""": XLMWithLMHeadModel, """question-answering""": XLMForQuestionAnsweringSimple, """text-classification""": XLMForSequenceClassification, """text-generation""": XLMWithLMHeadModel, """token-classification""": XLMForTokenClassification, """zero-shot""": XLMForSequenceClassification, } if is_torch_available() else {} ) def a__ ( self: str , __a: Optional[int] , __a: Dict , __a: Union[str, Any] , __a: List[Any] , __a: int )-> List[str]: if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("""Fast""" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def a__ ( self: Any , __a: Optional[Any] , __a: str , __a: int=False )-> Tuple: lowerCamelCase : Optional[Any] = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ ) if return_labels: if model_class.__name__ == "XLMForQuestionAnswering": lowerCamelCase : Any = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case_ ) lowerCamelCase : Union[str, Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case_ ) return inputs_dict def a__ ( self: Optional[Any] )-> Any: lowerCamelCase : int = XLMModelTester(self ) lowerCamelCase : Any = ConfigTester(self , config_class=snake_case_ , emb_dim=37 ) def a__ ( self: List[Any] )-> Tuple: self.config_tester.run_common_tests() def a__ ( self: Tuple )-> Optional[int]: lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*snake_case_ ) def a__ ( self: Optional[Any] )-> int: lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*snake_case_ ) def a__ ( self: Optional[Any] )-> Optional[Any]: lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_simple_qa(*snake_case_ ) def a__ ( self: Optional[int] )-> int: lowerCamelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*snake_case_ ) def a__ ( self: List[str] )-> Tuple: lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*snake_case_ ) def a__ ( self: Tuple )-> Union[str, Any]: lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_token_classif(*snake_case_ ) def a__ ( self: str )-> List[str]: lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*snake_case_ ) def a__ ( self: Any , __a: Optional[int] , __a: Optional[Any] , __a: Optional[int] , __a: int , __a: Dict , __a: Optional[int]=False , __a: Union[str, Any]=1 )-> Any: self.assertIsInstance(snake_case_ , snake_case_ ) self.assertListEqual( [isinstance(snake_case_ , snake_case_ ) for iter_attentions in attentions] , [True] * len(snake_case_ ) ) self.assertEqual(len(snake_case_ ) , (max_length - min_length) * num_beam_groups ) for idx, iter_attentions in enumerate(snake_case_ ): # adds PAD dummy token lowerCamelCase : Optional[Any] = min_length + idx + 1 lowerCamelCase : Optional[Any] = min_length + idx + 1 lowerCamelCase : Any = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(snake_case_ ) ) def a__ ( self: Dict , __a: Any , __a: List[str] , __a: Any , __a: Dict , __a: int , __a: Dict=False , __a: Optional[int]=1 )-> Union[str, Any]: self.assertIsInstance(snake_case_ , snake_case_ ) self.assertListEqual( [isinstance(snake_case_ , snake_case_ ) for iter_hidden_states in hidden_states] , [True] * len(snake_case_ ) , ) self.assertEqual(len(snake_case_ ) , (max_length - min_length) * num_beam_groups ) for idx, iter_hidden_states in enumerate(snake_case_ ): # adds PAD dummy token lowerCamelCase : Dict = min_length + idx + 1 lowerCamelCase : Optional[Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(snake_case_ ) , ) pass @slow def a__ ( self: Dict )-> str: for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase : Any = XLMModel.from_pretrained(snake_case_ ) self.assertIsNotNone(snake_case_ ) @require_torch class A__ ( unittest.TestCase): """simple docstring""" @slow def a__ ( self: str )-> Any: lowerCamelCase : Optional[Any] = XLMWithLMHeadModel.from_pretrained("""xlm-mlm-en-2048""" ) model.to(snake_case_ ) lowerCamelCase : List[Any] = torch.tensor([[14, 447]] , dtype=torch.long , device=snake_case_ ) # the president lowerCamelCase : List[Any] = [ 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, 14, 447, ] # the president the president the president the president the president the president the president the president the president the president # TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference lowerCamelCase : Any = model.generate(snake_case_ , do_sample=snake_case_ ) self.assertListEqual(output_ids[0].cpu().numpy().tolist() , snake_case_ )
720
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class A__ ( __lowercase , __lowercase , unittest.TestCase): """simple docstring""" snake_case__ : str =StableDiffusionXLImgaImgPipeline snake_case__ : Any =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''} snake_case__ : Optional[int] =PipelineTesterMixin.required_optional_params - {'''latents'''} snake_case__ : Dict =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS snake_case__ : Tuple =IMAGE_TO_IMAGE_IMAGE_PARAMS snake_case__ : List[str] =IMAGE_TO_IMAGE_IMAGE_PARAMS def a__ ( self: List[str] )-> int: torch.manual_seed(0 ) lowerCamelCase : Any = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=__a , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , ) lowerCamelCase : Any = EulerDiscreteScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , ) torch.manual_seed(0 ) lowerCamelCase : Any = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) lowerCamelCase : Optional[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=32 , ) lowerCamelCase : Dict = CLIPTextModel(__a ) lowerCamelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__a ) lowerCamelCase : Dict = CLIPTextModelWithProjection(__a ) lowerCamelCase : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__a ) lowerCamelCase : str = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """text_encoder_2""": text_encoder_a, """tokenizer_2""": tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def a__ ( self: Any , __a: str , __a: Tuple=0 )-> Union[str, Any]: lowerCamelCase : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a ) lowerCamelCase : Any = image / 2 + 0.5 if str(__a ).startswith("""mps""" ): lowerCamelCase : Dict = torch.manual_seed(__a ) else: lowerCamelCase : Tuple = torch.Generator(device=__a ).manual_seed(__a ) lowerCamelCase : Tuple = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 5.0, """output_type""": """numpy""", """strength""": 0.75, } return inputs def a__ ( self: Dict )-> Optional[Any]: lowerCamelCase : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator lowerCamelCase : Union[str, Any] = self.get_dummy_components() lowerCamelCase : Optional[int] = StableDiffusionXLImgaImgPipeline(**__a ) lowerCamelCase : int = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) lowerCamelCase : Optional[Any] = self.get_dummy_inputs(__a ) lowerCamelCase : Optional[int] = sd_pipe(**__a ).images lowerCamelCase : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCamelCase : Any = np.array([0.46_56, 0.48_40, 0.44_39, 0.66_98, 0.55_74, 0.45_24, 0.57_99, 0.59_43, 0.51_65] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def a__ ( self: Optional[int] )-> Union[str, Any]: super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 ) def a__ ( self: Optional[Any] )-> str: super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def a__ ( self: List[str] )-> Optional[Any]: pass def a__ ( self: List[Any] )-> Union[str, Any]: lowerCamelCase : Tuple = self.get_dummy_components() lowerCamelCase : Union[str, Any] = StableDiffusionXLImgaImgPipeline(**__a ) lowerCamelCase : str = sd_pipe.to(__a ) lowerCamelCase : Any = sd_pipe.to(__a ) sd_pipe.set_progress_bar_config(disable=__a ) # forward without prompt embeds lowerCamelCase : Dict = self.get_dummy_inputs(__a ) lowerCamelCase : Any = 3 * ["""this is a negative prompt"""] lowerCamelCase : Optional[int] = negative_prompt lowerCamelCase : Tuple = 3 * [inputs["""prompt"""]] lowerCamelCase : List[Any] = sd_pipe(**__a ) lowerCamelCase : Optional[int] = output.images[0, -3:, -3:, -1] # forward with prompt embeds lowerCamelCase : Tuple = self.get_dummy_inputs(__a ) lowerCamelCase : List[Any] = 3 * ["""this is a negative prompt"""] lowerCamelCase : Tuple = 3 * [inputs.pop("""prompt""" )] ( ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ) : Union[str, Any] = sd_pipe.encode_prompt(__a , negative_prompt=__a ) lowerCamelCase : int = sd_pipe( **__a , prompt_embeds=__a , negative_prompt_embeds=__a , pooled_prompt_embeds=__a , negative_pooled_prompt_embeds=__a , ) lowerCamelCase : Union[str, Any] = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4 @slow @require_torch_gpu class A__ ( unittest.TestCase): """simple docstring""" def a__ ( self: Dict )-> str: super().tearDown() gc.collect() torch.cuda.empty_cache() def a__ ( self: Union[str, Any] , __a: Any , __a: Any="cpu" , __a: str=torch.floataa , __a: Any=0 )-> Optional[Any]: lowerCamelCase : Optional[Any] = torch.Generator(device=__a ).manual_seed(__a ) lowerCamelCase : List[Any] = np.random.RandomState(__a ).standard_normal((1, 4, 64, 64) ) lowerCamelCase : List[str] = torch.from_numpy(__a ).to(device=__a , dtype=__a ) lowerCamelCase : int = { """prompt""": """a photograph of an astronaut riding a horse""", """latents""": latents, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def a__ ( self: Optional[int] )-> List[str]: lowerCamelCase : Tuple = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) lowerCamelCase : Optional[int] = self.get_inputs(__a ) lowerCamelCase : Optional[Any] = pipe(**__a ).images lowerCamelCase : Dict = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) lowerCamelCase : List[str] = np.array([0.4_94_93, 0.4_78_96, 0.4_07_98, 0.5_42_14, 0.5_32_12, 0.4_82_02, 0.4_76_56, 0.4_63_29, 0.4_85_06] ) assert np.abs(image_slice - expected_slice ).max() < 7e-3
42
0
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase :Tuple = logging.get_logger(__name__) __lowerCamelCase :List[str] = { 'facebook/wav2vec2-base-960h': 'https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json', # See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2 } class A__ ( lowercase__): """simple docstring""" snake_case__ : Any ='''wav2vec2''' def __init__( self: List[Any] , __a: Union[str, Any]=32 , __a: int=768 , __a: Optional[Any]=12 , __a: Union[str, Any]=12 , __a: Union[str, Any]=3_072 , __a: Tuple="gelu" , __a: List[Any]=0.1 , __a: int=0.1 , __a: Any=0.1 , __a: Optional[Any]=0.0 , __a: Tuple=0.0 , __a: Optional[int]=0.1 , __a: Tuple=0.1 , __a: List[str]=0.02 , __a: Optional[Any]=1e-5 , __a: List[Any]="group" , __a: Dict="gelu" , __a: Union[str, Any]=(512, 512, 512, 512, 512, 512, 512) , __a: Optional[Any]=(5, 2, 2, 2, 2, 2, 2) , __a: Any=(10, 3, 3, 3, 3, 2, 2) , __a: Dict=False , __a: List[str]=128 , __a: Union[str, Any]=16 , __a: List[str]=False , __a: int=True , __a: Any=0.05 , __a: Union[str, Any]=10 , __a: List[str]=2 , __a: List[Any]=0.0 , __a: List[str]=10 , __a: Union[str, Any]=0 , __a: List[str]=320 , __a: List[str]=2 , __a: List[str]=0.1 , __a: Tuple=100 , __a: Optional[Any]=256 , __a: List[Any]=256 , __a: str=0.1 , __a: Dict="sum" , __a: List[str]=False , __a: Optional[int]=False , __a: Union[str, Any]=256 , __a: Dict=(512, 512, 512, 512, 1_500) , __a: Optional[int]=(5, 3, 3, 1, 1) , __a: Optional[int]=(1, 2, 3, 1, 1) , __a: List[str]=512 , __a: Optional[Any]=0 , __a: List[str]=1 , __a: List[Any]=2 , __a: Dict=False , __a: str=3 , __a: int=2 , __a: Optional[Any]=3 , __a: str=None , __a: Tuple=None , **__a: Optional[Any] , )-> Optional[int]: super().__init__(**UpperCAmelCase__ , pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ ) lowerCamelCase : Union[str, Any] = hidden_size lowerCamelCase : Optional[int] = feat_extract_norm lowerCamelCase : Optional[int] = feat_extract_activation lowerCamelCase : Dict = list(UpperCAmelCase__ ) lowerCamelCase : str = list(UpperCAmelCase__ ) lowerCamelCase : str = list(UpperCAmelCase__ ) lowerCamelCase : List[Any] = conv_bias lowerCamelCase : List[Any] = num_conv_pos_embeddings lowerCamelCase : Dict = num_conv_pos_embedding_groups lowerCamelCase : Optional[int] = len(self.conv_dim ) lowerCamelCase : Any = num_hidden_layers lowerCamelCase : int = intermediate_size lowerCamelCase : List[str] = hidden_act lowerCamelCase : Dict = num_attention_heads lowerCamelCase : Tuple = hidden_dropout lowerCamelCase : Optional[int] = attention_dropout lowerCamelCase : Tuple = activation_dropout lowerCamelCase : str = feat_proj_dropout lowerCamelCase : int = final_dropout lowerCamelCase : Optional[Any] = layerdrop lowerCamelCase : Union[str, Any] = layer_norm_eps lowerCamelCase : List[str] = initializer_range lowerCamelCase : str = vocab_size lowerCamelCase : Optional[Any] = do_stable_layer_norm lowerCamelCase : List[Any] = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==""" """ `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =""" f' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,' f' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCamelCase : List[Any] = apply_spec_augment lowerCamelCase : Optional[Any] = mask_time_prob lowerCamelCase : Union[str, Any] = mask_time_length lowerCamelCase : Optional[int] = mask_time_min_masks lowerCamelCase : Union[str, Any] = mask_feature_prob lowerCamelCase : Any = mask_feature_length lowerCamelCase : int = mask_feature_min_masks # parameters for pretraining with codevector quantized representations lowerCamelCase : Optional[int] = num_codevectors_per_group lowerCamelCase : int = num_codevector_groups lowerCamelCase : Optional[Any] = contrastive_logits_temperature lowerCamelCase : str = feat_quantizer_dropout lowerCamelCase : List[str] = num_negatives lowerCamelCase : int = codevector_dim lowerCamelCase : Any = proj_codevector_dim lowerCamelCase : List[Any] = diversity_loss_weight # ctc loss lowerCamelCase : Tuple = ctc_loss_reduction lowerCamelCase : Optional[Any] = ctc_zero_infinity # adapter lowerCamelCase : Dict = add_adapter lowerCamelCase : str = adapter_kernel_size lowerCamelCase : Optional[Any] = adapter_stride lowerCamelCase : Union[str, Any] = num_adapter_layers lowerCamelCase : Union[str, Any] = output_hidden_size or hidden_size lowerCamelCase : Dict = adapter_attn_dim # SequenceClassification-specific parameter. Feel free to ignore for other classes. lowerCamelCase : Union[str, Any] = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. lowerCamelCase : List[str] = list(UpperCAmelCase__ ) lowerCamelCase : List[str] = list(UpperCAmelCase__ ) lowerCamelCase : Dict = list(UpperCAmelCase__ ) lowerCamelCase : Optional[int] = xvector_output_dim @property def a__ ( self: Any )-> Any: return functools.reduce(operator.mul , self.conv_stride , 1 )
721
"""simple docstring""" import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class A__ : """simple docstring""" def a__ ( self: Optional[int] , __a: Optional[int] , __a: Tuple , __a: Optional[int] )-> List[str]: return None class A__ : """simple docstring""" def a__ ( self: Optional[int] , __a: Tuple , __a: str , __a: str , __a: str )-> Tuple: return None class A__ ( unittest.TestCase): """simple docstring""" snake_case__ : Optional[Any] =[ # (model_name, model_kwargs) ('''bert-base-cased''', {}), ('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def a__ ( self: Optional[Any] )-> int: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__a , """tf""" , 12 , **__a ) @require_torch @slow def a__ ( self: str )-> int: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__a , """pt""" , 12 , **__a ) @require_torch @slow def a__ ( self: Union[str, Any] )-> Dict: from transformers import BertModel lowerCamelCase : int = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""] with NamedTemporaryFile(mode="""w+t""" ) as vocab_file: vocab_file.write("""\n""".join(__a ) ) vocab_file.flush() lowerCamelCase : Dict = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: lowerCamelCase : List[str] = BertModel(BertConfig(vocab_size=len(__a ) ) ) model.save_pretrained(__a ) self._test_export(__a , """pt""" , 12 , __a ) @require_tf @slow def a__ ( self: Optional[Any] )-> Optional[int]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowerCamelCase : Optional[int] = self._test_export(__a , """tf""" , 12 , **__a ) lowerCamelCase : Tuple = quantize(Path(__a ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__a ).stat().st_size: self.fail("""Quantized model is bigger than initial ONNX model""" ) @require_torch @slow def a__ ( self: Any )-> Optional[int]: for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowerCamelCase : Any = self._test_export(__a , """pt""" , 12 , **__a ) lowerCamelCase : Dict = quantize(__a ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__a ).stat().st_size: self.fail("""Quantized model is bigger than initial ONNX model""" ) def a__ ( self: List[Any] , __a: Optional[Any] , __a: List[Any] , __a: Union[str, Any] , __a: Optional[Any]=None , **__a: Optional[int] )-> Any: try: # Compute path with TemporaryDirectory() as tempdir: lowerCamelCase : Optional[Any] = Path(__a ).joinpath("""model.onnx""" ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(__a , __a , __a , __a , __a , **__a ) return path except Exception as e: self.fail(__a ) @require_torch @require_tokenizers @slow def a__ ( self: Tuple )-> Dict: from transformers import BertModel lowerCamelCase : int = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) ) lowerCamelCase : List[Any] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" ) self._test_infer_dynamic_axis(__a , __a , """pt""" ) @require_tf @require_tokenizers @slow def a__ ( self: Optional[Any] )-> List[Any]: from transformers import TFBertModel lowerCamelCase : Union[str, Any] = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) ) lowerCamelCase : str = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" ) self._test_infer_dynamic_axis(__a , __a , """tf""" ) def a__ ( self: List[str] , __a: str , __a: Optional[Any] , __a: str )-> List[Any]: lowerCamelCase : List[str] = FeatureExtractionPipeline(__a , __a ) lowerCamelCase : List[str] = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""] lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = infer_shapes(__a , __a ) # Assert all variables are present self.assertEqual(len(__a ) , len(__a ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] , __a ) self.assertSequenceEqual(variable_names[3:] , __a ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} ) self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} ) def a__ ( self: List[Any] )-> int: lowerCamelCase : List[str] = ["""input_ids""", """attention_mask""", """token_type_ids"""] lowerCamelCase : str = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]} lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncContiguousArgs() , __a , __a ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(__a ) , 3 ) # Should have exactly the same input names self.assertEqual(set(__a ) , set(__a ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(__a , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) lowerCamelCase , lowerCamelCase : List[Any] = ensure_valid_input(FuncNonContiguousArgs() , __a , __a ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(__a ) , 1 ) self.assertEqual(len(__a ) , 1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] , tokens["""input_ids"""] ) self.assertEqual(ordered_input_names[0] , """input_ids""" ) def a__ ( self: Tuple )-> Tuple: lowerCamelCase : Optional[int] = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" ) self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
42
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase :List[Any] = logging.get_logger(__name__) __lowerCamelCase :Dict = { '''caidas/swin2sr-classicalsr-x2-64''': ( '''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json''' ), } class A__ ( __UpperCAmelCase): """simple docstring""" snake_case__ : Optional[Any] ="swin2sr" snake_case__ : str ={ "hidden_size": "embed_dim", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self: Optional[int] , __a: Dict=64 , __a: str=1 , __a: Optional[int]=3 , __a: List[Any]=180 , __a: Optional[int]=[6, 6, 6, 6, 6, 6] , __a: Union[str, Any]=[6, 6, 6, 6, 6, 6] , __a: Tuple=8 , __a: List[Any]=2.0 , __a: Dict=True , __a: Dict=0.0 , __a: int=0.0 , __a: Optional[int]=0.1 , __a: Any="gelu" , __a: int=False , __a: Tuple=0.02 , __a: List[str]=1e-5 , __a: List[Any]=2 , __a: str=1.0 , __a: List[Any]="1conv" , __a: str="pixelshuffle" , **__a: List[str] , )-> str: super().__init__(**_lowerCamelCase ) lowerCamelCase : int = image_size lowerCamelCase : List[str] = patch_size lowerCamelCase : Dict = num_channels lowerCamelCase : Optional[int] = embed_dim lowerCamelCase : List[Any] = depths lowerCamelCase : Optional[Any] = len(_lowerCamelCase ) lowerCamelCase : Any = num_heads lowerCamelCase : Any = window_size lowerCamelCase : int = mlp_ratio lowerCamelCase : Optional[int] = qkv_bias lowerCamelCase : int = hidden_dropout_prob lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob lowerCamelCase : Optional[int] = drop_path_rate lowerCamelCase : Dict = hidden_act lowerCamelCase : int = use_absolute_embeddings lowerCamelCase : Any = layer_norm_eps lowerCamelCase : List[Any] = initializer_range lowerCamelCase : Optional[int] = upscale lowerCamelCase : Tuple = img_range lowerCamelCase : Optional[int] = resi_connection lowerCamelCase : Optional[Any] = upsampler
700
"""simple docstring""" import unittest from knapsack import greedy_knapsack as kp class A__ ( unittest.TestCase): """simple docstring""" def a__ ( self: Optional[int] )-> Union[str, Any]: lowerCamelCase : Tuple = [10, 20, 30, 40, 50, 60] lowerCamelCase : Union[str, Any] = [2, 4, 6, 8, 10, 12] lowerCamelCase : Union[str, Any] = 100 self.assertEqual(kp.calc_profit(__a , __a , __a ) , 210 ) def a__ ( self: str )-> str: self.assertRaisesRegex(__a , """max_weight must greater than zero.""" ) def a__ ( self: str )-> List[Any]: self.assertRaisesRegex(__a , """Weight can not be negative.""" ) def a__ ( self: Any )-> Dict: self.assertRaisesRegex(__a , """Profit can not be negative.""" ) def a__ ( self: Optional[Any] )-> List[Any]: self.assertRaisesRegex(__a , """max_weight must greater than zero.""" ) def a__ ( self: Optional[Any] )-> Tuple: self.assertRaisesRegex( __a , """The length of profit and weight must be same.""" ) if __name__ == "__main__": unittest.main()
42
0
"""simple docstring""" import unittest import numpy as np from transformers import BertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.bert.modeling_flax_bert import ( FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, ) class A__ ( unittest.TestCase): """simple docstring""" def __init__( self: int , __a: Any , __a: int=13 , __a: Any=7 , __a: Union[str, Any]=True , __a: List[Any]=True , __a: List[str]=True , __a: List[Any]=True , __a: Tuple=99 , __a: str=32 , __a: Union[str, Any]=5 , __a: List[str]=4 , __a: List[Any]=37 , __a: Optional[Any]="gelu" , __a: Dict=0.1 , __a: List[str]=0.1 , __a: Tuple=512 , __a: Union[str, Any]=16 , __a: int=2 , __a: List[str]=0.02 , __a: List[str]=4 , )-> Optional[int]: lowerCamelCase : Optional[Any] = parent lowerCamelCase : Dict = batch_size lowerCamelCase : Dict = seq_length lowerCamelCase : Any = is_training lowerCamelCase : List[str] = use_attention_mask lowerCamelCase : int = use_token_type_ids lowerCamelCase : Union[str, Any] = use_labels lowerCamelCase : Optional[int] = vocab_size lowerCamelCase : Tuple = hidden_size lowerCamelCase : Tuple = num_hidden_layers lowerCamelCase : Optional[int] = num_attention_heads lowerCamelCase : Tuple = intermediate_size lowerCamelCase : Any = hidden_act lowerCamelCase : Optional[Any] = hidden_dropout_prob lowerCamelCase : int = attention_probs_dropout_prob lowerCamelCase : Dict = max_position_embeddings lowerCamelCase : List[str] = type_vocab_size lowerCamelCase : Union[str, Any] = type_sequence_label_size lowerCamelCase : Any = initializer_range lowerCamelCase : List[Any] = num_choices def a__ ( self: str )-> int: lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase : List[Any] = None if self.use_attention_mask: lowerCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase : Union[str, Any] = None if self.use_token_type_ids: lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase : Union[str, Any] = BertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def a__ ( self: List[str] )-> List[Any]: lowerCamelCase : Optional[int] = self.prepare_config_and_inputs() lowerCamelCase : List[str] = config_and_inputs lowerCamelCase : Tuple = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict def a__ ( self: Union[str, Any] )-> Optional[Any]: lowerCamelCase : str = self.prepare_config_and_inputs() lowerCamelCase : Tuple = config_and_inputs lowerCamelCase : Optional[Any] = True lowerCamelCase : Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowerCamelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask, ) @require_flax class A__ ( lowerCamelCase__ , unittest.TestCase): """simple docstring""" snake_case__ : str =True snake_case__ : Dict =( ( FlaxBertModel, FlaxBertForPreTraining, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForQuestionAnswering, FlaxBertForNextSentencePrediction, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertForQuestionAnswering, ) if is_flax_available() else () ) def a__ ( self: Dict )-> Dict: lowerCamelCase : Optional[int] = FlaxBertModelTester(self ) @slow def a__ ( self: List[Any] )-> str: lowerCamelCase : Tuple = FlaxBertModel.from_pretrained("""bert-base-cased""" ) lowerCamelCase : int = model(np.ones((1, 1) ) ) self.assertIsNotNone(__lowerCamelCase )
701
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) __lowerCamelCase :List[str] = { 'configuration_owlvit': [ 'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OwlViTConfig', 'OwlViTOnnxConfig', 'OwlViTTextConfig', 'OwlViTVisionConfig', ], 'processing_owlvit': ['OwlViTProcessor'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase :Optional[int] = ['OwlViTFeatureExtractor'] __lowerCamelCase :List[str] = ['OwlViTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase :Optional[Any] = [ 'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'OwlViTModel', 'OwlViTPreTrainedModel', 'OwlViTTextModel', 'OwlViTVisionModel', 'OwlViTForObjectDetection', ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys __lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
42
0
"""simple docstring""" import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class A__ ( unittest.TestCase): """simple docstring""" def a__ ( self: Tuple , __a: Tuple , __a: str )-> int: return f'gaussian_noise_s={seed}_shape={"_".join([str(_lowercase ) for s in shape] )}.npy' def a__ ( self: Dict )-> Any: # clean up the VRAM after each test super().tearDown() gc.collect() def a__ ( self: Dict , __a: Any=0 , __a: int=(4, 4, 64, 64) , __a: Dict=False )-> Optional[Any]: lowerCamelCase : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa lowerCamelCase : List[Any] = jnp.array(load_hf_numpy(self.get_file_format(_lowercase , _lowercase ) ) , dtype=_lowercase ) return image def a__ ( self: str , __a: List[str]=False , __a: Optional[int]="CompVis/stable-diffusion-v1-4" )-> str: lowerCamelCase : str = jnp.bfloataa if fpaa else jnp.floataa lowerCamelCase : int = """bf16""" if fpaa else None lowerCamelCase , lowerCamelCase : int = FlaxUNetaDConditionModel.from_pretrained( _lowercase , subfolder="""unet""" , dtype=_lowercase , revision=_lowercase ) return model, params def a__ ( self: Tuple , __a: Any=0 , __a: Dict=(4, 77, 768) , __a: List[str]=False )-> Any: lowerCamelCase : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa lowerCamelCase : Optional[Any] = jnp.array(load_hf_numpy(self.get_file_format(_lowercase , _lowercase ) ) , dtype=_lowercase ) return hidden_states @parameterized.expand( [ # fmt: off [83, 4, [-0.23_23, -0.13_04, 0.08_13, -0.30_93, -0.09_19, -0.15_71, -0.11_25, -0.58_06]], [17, 0.55, [-0.08_31, -0.24_43, 0.09_01, -0.09_19, 0.33_96, 0.01_03, -0.37_43, 0.07_01]], [8, 0.89, [-0.48_63, 0.08_59, 0.08_75, -0.16_58, 0.91_99, -0.01_14, 0.48_39, 0.46_39]], [3, 1_000, [-0.56_49, 0.24_02, -0.55_18, 0.12_48, 1.13_28, -0.24_43, -0.03_25, -1.00_78]], # fmt: on ] ) def a__ ( self: List[Any] , __a: str , __a: Tuple , __a: Optional[Any] )-> int: lowerCamelCase , lowerCamelCase : Dict = self.get_unet_model(model_id="""CompVis/stable-diffusion-v1-4""" , fpaa=_lowercase ) lowerCamelCase : Dict = self.get_latents(_lowercase , fpaa=_lowercase ) lowerCamelCase : str = self.get_encoder_hidden_states(_lowercase , fpaa=_lowercase ) lowerCamelCase : Union[str, Any] = model.apply( {"""params""": params} , _lowercase , jnp.array(_lowercase , dtype=jnp.intaa ) , encoder_hidden_states=_lowercase , ).sample assert sample.shape == latents.shape lowerCamelCase : Optional[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) lowerCamelCase : Union[str, Any] = jnp.array(_lowercase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(_lowercase , _lowercase , atol=1e-2 ) @parameterized.expand( [ # fmt: off [83, 4, [0.15_14, 0.08_07, 0.16_24, 0.10_16, -0.18_96, 0.02_63, 0.06_77, 0.23_10]], [17, 0.55, [0.11_64, -0.02_16, 0.01_70, 0.15_89, -0.31_20, 0.10_05, -0.05_81, -0.14_58]], [8, 0.89, [-0.17_58, -0.01_69, 0.10_04, -0.14_11, 0.13_12, 0.11_03, -0.19_96, 0.21_39]], [3, 1_000, [0.12_14, 0.03_52, -0.07_31, -0.15_62, -0.09_94, -0.09_06, -0.23_40, -0.05_39]], # fmt: on ] ) def a__ ( self: int , __a: Optional[Any] , __a: str , __a: List[Any] )-> Any: lowerCamelCase , lowerCamelCase : Optional[Any] = self.get_unet_model(model_id="""stabilityai/stable-diffusion-2""" , fpaa=_lowercase ) lowerCamelCase : Dict = self.get_latents(_lowercase , shape=(4, 4, 96, 96) , fpaa=_lowercase ) lowerCamelCase : Union[str, Any] = self.get_encoder_hidden_states(_lowercase , shape=(4, 77, 1_024) , fpaa=_lowercase ) lowerCamelCase : Any = model.apply( {"""params""": params} , _lowercase , jnp.array(_lowercase , dtype=jnp.intaa ) , encoder_hidden_states=_lowercase , ).sample assert sample.shape == latents.shape lowerCamelCase : int = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) lowerCamelCase : str = jnp.array(_lowercase , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(_lowercase , _lowercase , atol=1e-2 )
702
"""simple docstring""" import collections import inspect import unittest from transformers import FocalNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, ) from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class A__ : """simple docstring""" def __init__( self: List[Any] , __a: List[str] , __a: Optional[int]=13 , __a: List[str]=32 , __a: int=2 , __a: List[str]=3 , __a: Union[str, Any]=16 , __a: int=[32, 64, 128] , __a: Optional[Any]=[1, 2, 1] , __a: Optional[int]=[2, 2, 4] , __a: Tuple=2 , __a: Dict=2.0 , __a: List[str]=True , __a: Optional[Any]=0.0 , __a: Any=0.0 , __a: List[Any]=0.1 , __a: List[str]="gelu" , __a: Tuple=False , __a: Union[str, Any]=True , __a: Optional[int]=0.02 , __a: Tuple=1e-5 , __a: int=True , __a: List[Any]=None , __a: Optional[int]=True , __a: Dict=10 , __a: List[str]=8 , __a: Any=["stage1", "stage2"] , __a: Union[str, Any]=[1, 2] , )-> Dict: lowerCamelCase : Dict = parent lowerCamelCase : Optional[Any] = batch_size lowerCamelCase : Union[str, Any] = image_size lowerCamelCase : Optional[int] = patch_size lowerCamelCase : Any = num_channels lowerCamelCase : Any = embed_dim lowerCamelCase : Dict = hidden_sizes lowerCamelCase : List[Any] = depths lowerCamelCase : Tuple = num_heads lowerCamelCase : List[Any] = window_size lowerCamelCase : str = mlp_ratio lowerCamelCase : str = qkv_bias lowerCamelCase : str = hidden_dropout_prob lowerCamelCase : Dict = attention_probs_dropout_prob lowerCamelCase : Tuple = drop_path_rate lowerCamelCase : Dict = hidden_act lowerCamelCase : Tuple = use_absolute_embeddings lowerCamelCase : List[str] = patch_norm lowerCamelCase : List[str] = layer_norm_eps lowerCamelCase : str = initializer_range lowerCamelCase : Tuple = is_training lowerCamelCase : int = scope lowerCamelCase : Union[str, Any] = use_labels lowerCamelCase : List[str] = type_sequence_label_size lowerCamelCase : str = encoder_stride lowerCamelCase : List[str] = out_features lowerCamelCase : Optional[int] = out_indices def a__ ( self: Optional[Any] )-> Union[str, Any]: lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCamelCase : str = None if self.use_labels: lowerCamelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase : str = self.get_config() return config, pixel_values, labels def a__ ( self: List[Any] )-> Optional[int]: return FocalNetConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def a__ ( self: Tuple , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]: lowerCamelCase : Tuple = FocalNetModel(config=__a ) model.to(__a ) model.eval() lowerCamelCase : Tuple = model(__a ) lowerCamelCase : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowerCamelCase : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def a__ ( self: Optional[int] , __a: Dict , __a: Tuple , __a: List[Any] )-> int: lowerCamelCase : List[Any] = FocalNetBackbone(config=__a ) model.to(__a ) model.eval() lowerCamelCase : Optional[Any] = model(__a ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] ) # verify backbone works with out_features=None lowerCamelCase : Dict = None lowerCamelCase : Dict = FocalNetBackbone(config=__a ) model.to(__a ) model.eval() lowerCamelCase : Any = model(__a ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def a__ ( self: Optional[int] , __a: Optional[int] , __a: Optional[int] , __a: Optional[int] )-> List[str]: lowerCamelCase : Tuple = FocalNetForMaskedImageModeling(config=__a ) model.to(__a ) model.eval() lowerCamelCase : List[str] = model(__a ) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowerCamelCase : List[str] = 1 lowerCamelCase : Any = FocalNetForMaskedImageModeling(__a ) model.to(__a ) model.eval() lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase : Tuple = model(__a ) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def a__ ( self: str , __a: Optional[Any] , __a: Optional[Any] , __a: Tuple )-> str: lowerCamelCase : Optional[Any] = self.type_sequence_label_size lowerCamelCase : Optional[Any] = FocalNetForImageClassification(__a ) model.to(__a ) model.eval() lowerCamelCase : List[str] = model(__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCamelCase : int = 1 lowerCamelCase : List[Any] = FocalNetForImageClassification(__a ) model.to(__a ) model.eval() lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCamelCase : Optional[Any] = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def a__ ( self: int )-> Optional[int]: lowerCamelCase : str = self.prepare_config_and_inputs() lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = config_and_inputs lowerCamelCase : List[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class A__ ( __lowercase , __lowercase , unittest.TestCase): """simple docstring""" snake_case__ : List[str] =( ( FocalNetModel, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetBackbone, ) if is_torch_available() else () ) snake_case__ : Optional[int] =( {'''feature-extraction''': FocalNetModel, '''image-classification''': FocalNetForImageClassification} if is_torch_available() else {} ) snake_case__ : Tuple =False snake_case__ : Dict =False snake_case__ : Dict =False snake_case__ : Tuple =False snake_case__ : Optional[int] =False def a__ ( self: Union[str, Any] )-> Optional[int]: lowerCamelCase : List[str] = FocalNetModelTester(self ) lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=__a , embed_dim=37 , has_text_modality=__a ) def a__ ( self: List[str] )-> List[str]: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a__ ( self: List[str] )-> Union[str, Any]: return def a__ ( self: Tuple )-> Tuple: lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def a__ ( self: List[Any] )-> Dict: lowerCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__a ) def a__ ( self: List[Any] )-> Tuple: lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__a ) def a__ ( self: List[str] )-> Dict: lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a ) @unittest.skip(reason="""FocalNet does not use inputs_embeds""" ) def a__ ( self: Optional[Any] )-> str: pass @unittest.skip(reason="""FocalNet does not use feedforward chunking""" ) def a__ ( self: Optional[Any] )-> Dict: pass def a__ ( self: Optional[Any] )-> Dict: lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: lowerCamelCase : Any = model_class(__a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCamelCase : Dict = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__a , nn.Linear ) ) def a__ ( self: Tuple )-> Optional[int]: lowerCamelCase , lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: lowerCamelCase : int = model_class(__a ) lowerCamelCase : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCamelCase : Any = [*signature.parameters.keys()] lowerCamelCase : List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , __a ) def a__ ( self: str , __a: Union[str, Any] , __a: int , __a: Tuple , __a: List[str] )-> Union[str, Any]: lowerCamelCase : List[Any] = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): lowerCamelCase : List[str] = model(**self._prepare_for_class(__a , __a ) ) lowerCamelCase : List[str] = outputs.hidden_states lowerCamelCase : Tuple = getattr( self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(__a ) , __a ) # FocalNet has a different seq_length lowerCamelCase : Tuple = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCamelCase : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) lowerCamelCase : Optional[Any] = outputs.reshaped_hidden_states self.assertEqual(len(__a ) , __a ) lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[int] = reshaped_hidden_states[0].shape lowerCamelCase : Tuple = ( reshaped_hidden_states[0].view(__a , __a , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def a__ ( self: Any )-> Any: lowerCamelCase , lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase : Union[str, Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes[:-1]: lowerCamelCase : List[str] = True self.check_hidden_states_output(__a , __a , __a , __a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase : List[Any] = True self.check_hidden_states_output(__a , __a , __a , __a ) def a__ ( self: str )-> Union[str, Any]: lowerCamelCase , lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase : List[str] = 3 lowerCamelCase : Any = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowerCamelCase : Optional[int] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCamelCase : Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowerCamelCase : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes[:-1]: lowerCamelCase : str = True self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCamelCase : Union[str, Any] = True self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) ) @slow def a__ ( self: Optional[int] )-> List[Any]: for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase : List[str] = FocalNetModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def a__ ( self: str )-> Any: lowerCamelCase , lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCamelCase : int = _config_zero_init(__a ) for model_class in self.all_model_classes: lowerCamelCase : int = model_class(config=__a ) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) @require_vision @require_torch class A__ ( unittest.TestCase): """simple docstring""" @cached_property def a__ ( self: Optional[int] )-> Optional[Any]: # TODO update organization return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None @slow def a__ ( self: int )-> Optional[Any]: lowerCamelCase : Tuple = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(__a ) lowerCamelCase : Any = self.default_image_processor lowerCamelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) lowerCamelCase : int = image_processor(images=__a , return_tensors="""pt""" ).to(__a ) # forward pass with torch.no_grad(): lowerCamelCase : Any = model(**__a ) # verify the logits lowerCamelCase : Tuple = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , __a ) lowerCamelCase : List[str] = torch.tensor([0.21_66, -0.43_68, 0.21_91] ).to(__a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) ) self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 ) @require_torch class A__ ( __lowercase , unittest.TestCase): """simple docstring""" snake_case__ : str =(FocalNetBackbone,) if is_torch_available() else () snake_case__ : Optional[int] =FocalNetConfig snake_case__ : str =False def a__ ( self: Union[str, Any] )-> Tuple: lowerCamelCase : str = FocalNetModelTester(self )
42
0
"""simple docstring""" import numpy as np import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel from ...utils import logging __lowerCamelCase :List[str] = logging.get_logger(__name__) class A__ ( snake_case__): """simple docstring""" snake_case__ : int =CLIPConfig snake_case__ : Dict =['''CLIPEncoderLayer'''] def __init__( self: Any , __a: int )-> Optional[int]: super().__init__(lowercase_ ) lowerCamelCase : Dict = CLIPVisionModelWithProjection(config.vision_config ) lowerCamelCase : List[str] = nn.Linear(config.vision_config.projection_dim , 1 ) lowerCamelCase : Dict = nn.Linear(config.vision_config.projection_dim , 1 ) @torch.no_grad() def a__ ( self: Tuple , __a: List[str] , __a: List[Any] , __a: Optional[int]=0.5 , __a: int=0.5 )-> Optional[int]: lowerCamelCase : Any = self.vision_model(lowercase_ )[0] lowerCamelCase : Optional[int] = self.p_head(lowercase_ ) lowerCamelCase : Any = nsfw_detected.flatten() lowerCamelCase : Dict = nsfw_detected > p_threshold lowerCamelCase : Optional[int] = nsfw_detected.tolist() if any(lowercase_ ): logger.warning( """Potential NSFW content was detected in one or more images. A black image will be returned instead.""" """ Try again with a different prompt and/or seed.""" ) for idx, nsfw_detected_ in enumerate(lowercase_ ): if nsfw_detected_: lowerCamelCase : Any = np.zeros(images[idx].shape ) lowerCamelCase : List[str] = self.w_head(lowercase_ ) lowerCamelCase : Union[str, Any] = watermark_detected.flatten() lowerCamelCase : str = watermark_detected > w_threshold lowerCamelCase : str = watermark_detected.tolist() if any(lowercase_ ): logger.warning( """Potential watermarked content was detected in one or more images. A black image will be returned instead.""" """ Try again with a different prompt and/or seed.""" ) for idx, watermark_detected_ in enumerate(lowercase_ ): if watermark_detected_: lowerCamelCase : List[Any] = np.zeros(images[idx].shape ) return images, nsfw_detected, watermark_detected
703
"""simple docstring""" import os def snake_case ( ) -> Optional[Any]: with open(os.path.dirname(UpperCamelCase__ ) + """/grid.txt""" ) as f: lowerCamelCase : int = [] # noqa: E741 for _ in range(20 ): l.append([int(UpperCamelCase__ ) for x in f.readline().split()] ) lowerCamelCase : Union[str, Any] = 0 # right for i in range(20 ): for j in range(17 ): lowerCamelCase : Dict = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3] if temp > maximum: lowerCamelCase : Tuple = temp # down for i in range(17 ): for j in range(20 ): lowerCamelCase : Any = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j] if temp > maximum: lowerCamelCase : Optional[Any] = temp # diagonal 1 for i in range(17 ): for j in range(17 ): lowerCamelCase : List[Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3] if temp > maximum: lowerCamelCase : List[str] = temp # diagonal 2 for i in range(17 ): for j in range(3 , 20 ): lowerCamelCase : List[str] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3] if temp > maximum: lowerCamelCase : List[Any] = temp return maximum if __name__ == "__main__": print(solution())
42
0
"""simple docstring""" from __future__ import annotations import unittest from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel @require_tf class A__ : """simple docstring""" snake_case__ : Optional[Any] =BlenderbotConfig snake_case__ : List[Any] ={} snake_case__ : int ='''gelu''' def __init__( self: Any , __a: str , __a: Any=13 , __a: List[str]=7 , __a: Any=True , __a: Optional[Any]=False , __a: Tuple=99 , __a: Optional[int]=32 , __a: str=2 , __a: Dict=4 , __a: Dict=37 , __a: Tuple=0.1 , __a: Optional[int]=0.1 , __a: Optional[Any]=20 , __a: Optional[int]=2 , __a: Union[str, Any]=1 , __a: int=0 , )-> Tuple: lowerCamelCase : Union[str, Any] = parent lowerCamelCase : str = batch_size lowerCamelCase : Optional[Any] = seq_length lowerCamelCase : str = is_training lowerCamelCase : Any = use_labels lowerCamelCase : List[str] = vocab_size lowerCamelCase : Any = hidden_size lowerCamelCase : Union[str, Any] = num_hidden_layers lowerCamelCase : Optional[Any] = num_attention_heads lowerCamelCase : Union[str, Any] = intermediate_size lowerCamelCase : int = hidden_dropout_prob lowerCamelCase : List[Any] = attention_probs_dropout_prob lowerCamelCase : Dict = max_position_embeddings lowerCamelCase : Any = eos_token_id lowerCamelCase : str = pad_token_id lowerCamelCase : Optional[Any] = bos_token_id def a__ ( self: Any )-> Optional[Any]: lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) lowerCamelCase : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) lowerCamelCase : str = tf.concat([input_ids, eos_tensor] , axis=1 ) lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase : List[str] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) lowerCamelCase : str = prepare_blenderbot_inputs_dict(A_ , A_ , A_ ) return config, inputs_dict def a__ ( self: Any , __a: int , __a: Tuple )-> List[Any]: lowerCamelCase : Dict = TFBlenderbotModel(config=A_ ).get_decoder() lowerCamelCase : List[str] = inputs_dict["""input_ids"""] lowerCamelCase : Optional[int] = input_ids[:1, :] lowerCamelCase : Any = inputs_dict["""attention_mask"""][:1, :] lowerCamelCase : Union[str, Any] = inputs_dict["""head_mask"""] lowerCamelCase : List[Any] = 1 # first forward pass lowerCamelCase : int = model(A_ , attention_mask=A_ , head_mask=A_ , use_cache=A_ ) lowerCamelCase , lowerCamelCase : Union[str, Any] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids lowerCamelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowerCamelCase : List[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and lowerCamelCase : int = tf.concat([input_ids, next_tokens] , axis=-1 ) lowerCamelCase : Optional[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) lowerCamelCase : Optional[int] = model(A_ , attention_mask=A_ )[0] lowerCamelCase : Dict = model(A_ , attention_mask=A_ , past_key_values=A_ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice lowerCamelCase : Union[str, Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) lowerCamelCase : Tuple = output_from_no_past[:, -3:, random_slice_idx] lowerCamelCase : List[Any] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(A_ , A_ , rtol=1e-3 ) def snake_case ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : int=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : List[Any]=None , ) -> List[Any]: if attention_mask is None: lowerCamelCase : Any = tf.cast(tf.math.not_equal(UpperCamelCase__ , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: lowerCamelCase : Any = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: lowerCamelCase : Dict = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: lowerCamelCase : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: lowerCamelCase : Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class A__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase): """simple docstring""" snake_case__ : List[str] =(TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else () snake_case__ : Optional[int] =(TFBlenderbotForConditionalGeneration,) if is_tf_available() else () snake_case__ : Optional[Any] =( { '''conversational''': TFBlenderbotForConditionalGeneration, '''feature-extraction''': TFBlenderbotModel, '''summarization''': TFBlenderbotForConditionalGeneration, '''text2text-generation''': TFBlenderbotForConditionalGeneration, '''translation''': TFBlenderbotForConditionalGeneration, } if is_tf_available() else {} ) snake_case__ : Optional[Any] =True snake_case__ : Any =False snake_case__ : Optional[Any] =False def a__ ( self: str )-> List[str]: lowerCamelCase : Tuple = TFBlenderbotModelTester(self ) lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=A_ ) def a__ ( self: List[Any] )-> str: self.config_tester.run_common_tests() def a__ ( self: List[str] )-> Tuple: lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*A_ ) @require_tokenizers @require_tf class A__ ( unittest.TestCase): """simple docstring""" snake_case__ : Optional[Any] =['''My friends are cool but they eat too many carbs.'''] snake_case__ : Any ='''facebook/blenderbot-400M-distill''' @cached_property def a__ ( self: str )-> Optional[int]: return BlenderbotTokenizer.from_pretrained(self.model_name ) @cached_property def a__ ( self: Tuple )-> List[str]: lowerCamelCase : Any = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def a__ ( self: List[Any] )-> str: lowerCamelCase : List[Any] = self.tokenizer(self.src_text , return_tensors="""tf""" ) lowerCamelCase : int = self.model.generate( model_inputs.input_ids , ) lowerCamelCase : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=A_ )[0] assert ( generated_words == " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?" )
704
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionAttendAndExcitePipeline, UNetaDConditionModel, ) from diffusers.utils import load_numpy, skip_mps, slow from diffusers.utils.testing_utils import require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin __lowerCamelCase :Any = False @skip_mps class A__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase): """simple docstring""" snake_case__ : Optional[Any] =StableDiffusionAttendAndExcitePipeline snake_case__ : Any =False snake_case__ : Dict =TEXT_TO_IMAGE_PARAMS snake_case__ : Any =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''}) snake_case__ : Dict =TEXT_TO_IMAGE_IMAGE_PARAMS snake_case__ : str =TEXT_TO_IMAGE_IMAGE_PARAMS @classmethod def a__ ( cls: Dict )-> Tuple: super().setUpClass() torch.use_deterministic_algorithms(__a ) @classmethod def a__ ( cls: Union[str, Any] )-> Any: super().tearDownClass() torch.use_deterministic_algorithms(__a ) def a__ ( self: Tuple )-> Union[str, Any]: torch.manual_seed(0 ) lowerCamelCase : str = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , ) lowerCamelCase : Union[str, Any] = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=__a , set_alpha_to_one=__a , ) torch.manual_seed(0 ) lowerCamelCase : Union[str, Any] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) lowerCamelCase : str = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="""gelu""" , projection_dim=512 , ) lowerCamelCase : Optional[int] = CLIPTextModel(__a ) lowerCamelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowerCamelCase : List[str] = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def a__ ( self: Tuple , __a: int , __a: Union[str, Any]=0 )-> Optional[Any]: if str(__a ).startswith("""mps""" ): lowerCamelCase : Tuple = torch.manual_seed(__a ) else: lowerCamelCase : str = torch.Generator(device=__a ).manual_seed(__a ) lowerCamelCase : Dict = { """prompt""": """a cat and a frog""", """token_indices""": [2, 5], """generator""": generator, """num_inference_steps""": 1, """guidance_scale""": 6.0, """output_type""": """numpy""", """max_iter_to_alter""": 2, """thresholds""": {0: 0.7}, } return inputs def a__ ( self: Dict )-> str: lowerCamelCase : Tuple = """cpu""" lowerCamelCase : List[str] = self.get_dummy_components() lowerCamelCase : List[Any] = self.pipeline_class(**__a ) pipe.to(__a ) pipe.set_progress_bar_config(disable=__a ) lowerCamelCase : Any = self.get_dummy_inputs(__a ) lowerCamelCase : Union[str, Any] = pipe(**__a ).images lowerCamelCase : Tuple = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 64, 64, 3) ) lowerCamelCase : Optional[Any] = np.array( [0.63_90_53_64, 0.62_89_73_07, 0.48_59_90_17, 0.5_13_36_24, 0.5_55_00_48, 0.45_76_95_16, 0.50_32_69_73, 0.5_02_31_39, 0.45_38_44_96] ) lowerCamelCase : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(__a , 1e-3 ) def a__ ( self: int )-> Optional[Any]: super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 ) def a__ ( self: Union[str, Any] )-> Optional[int]: # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def a__ ( self: Tuple )-> int: self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 ) def a__ ( self: Dict )-> List[Any]: super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 ) def a__ ( self: Optional[int] )-> Dict: super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 ) def a__ ( self: Any )-> Tuple: super().test_save_load_local(expected_max_difference=5e-4 ) def a__ ( self: str )-> str: super().test_save_load_optional_components(expected_max_difference=4e-4 ) @require_torch_gpu @slow class A__ ( unittest.TestCase): """simple docstring""" @classmethod def a__ ( cls: Any )-> Tuple: super().setUpClass() torch.use_deterministic_algorithms(__a ) @classmethod def a__ ( cls: Dict )-> Optional[int]: super().tearDownClass() torch.use_deterministic_algorithms(__a ) def a__ ( self: int )-> Optional[int]: super().tearDown() gc.collect() torch.cuda.empty_cache() def a__ ( self: int )-> Optional[Any]: lowerCamelCase : List[Any] = torch.manual_seed(51 ) lowerCamelCase : List[str] = StableDiffusionAttendAndExcitePipeline.from_pretrained( """CompVis/stable-diffusion-v1-4""" , safety_checker=__a , torch_dtype=torch.floataa ) pipe.to("""cuda""" ) lowerCamelCase : Dict = """a painting of an elephant with glasses""" lowerCamelCase : Any = [5, 7] lowerCamelCase : Tuple = pipe( prompt=__a , token_indices=__a , guidance_scale=7.5 , generator=__a , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0] lowerCamelCase : Union[str, Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" ) assert np.abs((expected_image - image).max() ) < 5e-1
42
0
"""simple docstring""" from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax __lowerCamelCase :int = logging.get_logger(__name__) @add_end_docstrings(__lowercase) class A__ ( __lowercase): """simple docstring""" def __init__( self: Any , **__a: Any )-> str: super().__init__(**__a ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self: List[str] , __a: Union[str, List[str], "Image", List["Image"]] , **__a: Dict )-> Union[str, Any]: return super().__call__(__a , **__a ) def a__ ( self: int , **__a: Any )-> Dict: lowerCamelCase : Dict = {} if "candidate_labels" in kwargs: lowerCamelCase : List[str] = kwargs["candidate_labels"] if "hypothesis_template" in kwargs: lowerCamelCase : List[Any] = kwargs["hypothesis_template"] return preprocess_params, {}, {} def a__ ( self: Tuple , __a: Tuple , __a: Tuple=None , __a: Optional[Any]="This is a photo of {}." )-> List[str]: lowerCamelCase : Optional[int] = load_image(__a ) lowerCamelCase : Union[str, Any] = self.image_processor(images=[image] , return_tensors=self.framework ) lowerCamelCase : Optional[Any] = candidate_labels lowerCamelCase : str = [hypothesis_template.format(__a ) for x in candidate_labels] lowerCamelCase : List[str] = self.tokenizer(__a , return_tensors=self.framework , padding=__a ) lowerCamelCase : Tuple = [text_inputs] return inputs def a__ ( self: int , __a: str )-> List[Any]: lowerCamelCase : int = model_inputs.pop("""candidate_labels""" ) lowerCamelCase : Optional[int] = model_inputs.pop("""text_inputs""" ) if isinstance(text_inputs[0] , __a ): lowerCamelCase : int = text_inputs[0] else: # Batching case. lowerCamelCase : Optional[int] = text_inputs[0][0] lowerCamelCase : Any = self.model(**__a , **__a ) lowerCamelCase : Union[str, Any] = { "candidate_labels": candidate_labels, "logits": outputs.logits_per_image, } return model_outputs def a__ ( self: Optional[int] , __a: Union[str, Any] )-> str: lowerCamelCase : List[str] = model_outputs.pop("""candidate_labels""" ) lowerCamelCase : Optional[Any] = model_outputs["logits"][0] if self.framework == "pt": lowerCamelCase : str = logits.softmax(dim=-1 ).squeeze(-1 ) lowerCamelCase : Any = probs.tolist() if not isinstance(__a , __a ): lowerCamelCase : Optional[int] = [scores] elif self.framework == "tf": lowerCamelCase : Union[str, Any] = stable_softmax(__a , axis=-1 ) lowerCamelCase : str = probs.numpy().tolist() else: raise ValueError(f'Unsupported framework: {self.framework}' ) lowerCamelCase : Any = [ {"score": score, "label": candidate_label} for score, candidate_label in sorted(zip(__a , __a ) , key=lambda __a : -x[0] ) ] return result
705
"""simple docstring""" import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel from transformers.models.esm.modeling_esm import ( ESM_PRETRAINED_MODEL_ARCHIVE_LIST, EsmEmbeddings, create_position_ids_from_input_ids, ) class A__ : """simple docstring""" def __init__( self: List[str] , __a: List[str] , __a: Dict=13 , __a: Tuple=7 , __a: Dict=False , __a: str=True , __a: List[Any]=False , __a: Dict=True , __a: Any=33 , __a: Optional[Any]=32 , __a: List[Any]=5 , __a: Any=4 , __a: Dict=37 , __a: str="gelu" , __a: str=0.1 , __a: int=0.1 , __a: Optional[int]=512 , __a: List[Any]=16 , __a: int=2 , __a: int=0.02 , __a: Optional[int]=3 , __a: str=4 , __a: Tuple=None , )-> Tuple: lowerCamelCase : Union[str, Any] = parent lowerCamelCase : Tuple = batch_size lowerCamelCase : Any = seq_length lowerCamelCase : Any = is_training lowerCamelCase : Tuple = use_input_mask lowerCamelCase : int = use_token_type_ids lowerCamelCase : List[str] = use_labels lowerCamelCase : Optional[int] = vocab_size lowerCamelCase : Tuple = hidden_size lowerCamelCase : List[str] = num_hidden_layers lowerCamelCase : Optional[int] = num_attention_heads lowerCamelCase : Optional[Any] = intermediate_size lowerCamelCase : Optional[Any] = hidden_act lowerCamelCase : Union[str, Any] = hidden_dropout_prob lowerCamelCase : Optional[Any] = attention_probs_dropout_prob lowerCamelCase : Any = max_position_embeddings lowerCamelCase : str = type_vocab_size lowerCamelCase : List[Any] = type_sequence_label_size lowerCamelCase : Optional[Any] = initializer_range lowerCamelCase : Union[str, Any] = num_labels lowerCamelCase : Optional[Any] = num_choices lowerCamelCase : Any = scope def a__ ( self: Optional[int] )-> List[Any]: lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase : Dict = None if self.use_input_mask: lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase : Any = None lowerCamelCase : int = None lowerCamelCase : Union[str, Any] = None if self.use_labels: lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase : List[str] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def a__ ( self: Tuple )-> Union[str, Any]: return EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def a__ ( self: List[Any] , __a: List[str] , __a: str , __a: Tuple , __a: List[str] , __a: List[str] , __a: str )-> int: lowerCamelCase : Optional[int] = EsmModel(config=__a ) model.to(__a ) model.eval() lowerCamelCase : int = model(__a , attention_mask=__a ) lowerCamelCase : str = model(__a ) lowerCamelCase : Optional[Any] = model(__a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def a__ ( self: int , __a: Union[str, Any] , __a: Optional[int] , __a: List[str] , __a: str , __a: List[str] , __a: Tuple )-> int: lowerCamelCase : str = EsmForMaskedLM(config=__a ) model.to(__a ) model.eval() lowerCamelCase : List[Any] = model(__a , attention_mask=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a__ ( self: List[str] , __a: List[Any] , __a: List[str] , __a: int , __a: Union[str, Any] , __a: List[Any] , __a: Tuple )-> List[str]: lowerCamelCase : Tuple = self.num_labels lowerCamelCase : Dict = EsmForTokenClassification(config=__a ) model.to(__a ) model.eval() lowerCamelCase : int = model(__a , attention_mask=__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def a__ ( self: Optional[int] )-> Optional[int]: lowerCamelCase : Any = self.prepare_config_and_inputs() ( ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ( lowerCamelCase ) , ) : Tuple = config_and_inputs lowerCamelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class A__ ( __lowercase , __lowercase , unittest.TestCase): """simple docstring""" snake_case__ : Any =False snake_case__ : Dict =( ( EsmForMaskedLM, EsmModel, EsmForSequenceClassification, EsmForTokenClassification, ) if is_torch_available() else () ) snake_case__ : Dict =() snake_case__ : Optional[int] =( { '''feature-extraction''': EsmModel, '''fill-mask''': EsmForMaskedLM, '''text-classification''': EsmForSequenceClassification, '''token-classification''': EsmForTokenClassification, '''zero-shot''': EsmForSequenceClassification, } if is_torch_available() else {} ) snake_case__ : Any =True def a__ ( self: Optional[int] )-> Optional[int]: lowerCamelCase : Optional[Any] = EsmModelTester(self ) lowerCamelCase : Any = ConfigTester(self , config_class=__a , hidden_size=37 ) def a__ ( self: List[Any] )-> Optional[Any]: self.config_tester.run_common_tests() def a__ ( self: int )-> Optional[Any]: lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) def a__ ( self: Tuple )-> Any: lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowerCamelCase : Tuple = type self.model_tester.create_and_check_model(*__a ) def a__ ( self: List[str] )-> Tuple: lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__a ) def a__ ( self: int )-> Optional[Any]: lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__a ) @slow def a__ ( self: Any )-> List[Any]: for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase : int = EsmModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def a__ ( self: str )-> List[str]: lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0] lowerCamelCase : Union[str, Any] = EsmEmbeddings(config=__a ) lowerCamelCase : List[str] = torch.as_tensor([[12, 31, 13, model.padding_idx]] ) lowerCamelCase : Union[str, Any] = torch.as_tensor( [ [ 0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx, ] ] ) lowerCamelCase : Optional[Any] = create_position_ids_from_input_ids(__a , model.padding_idx ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__a , __a ) ) ) def a__ ( self: Optional[int] )-> int: lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()[0] lowerCamelCase : Any = EsmEmbeddings(config=__a ) lowerCamelCase : Dict = torch.empty(2 , 4 , 30 ) lowerCamelCase : List[Any] = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] lowerCamelCase : Any = torch.as_tensor([expected_single_positions, expected_single_positions] ) lowerCamelCase : List[str] = embeddings.create_position_ids_from_inputs_embeds(__a ) self.assertEqual(position_ids.shape , expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__a , __a ) ) ) @unittest.skip("""Esm does not support embedding resizing""" ) def a__ ( self: Any )-> Optional[Any]: pass @unittest.skip("""Esm does not support embedding resizing""" ) def a__ ( self: Dict )-> Dict: pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def a__ ( self: List[str] )-> Dict: pass @require_torch class A__ ( __lowercase): """simple docstring""" @slow def a__ ( self: Any )-> Union[str, Any]: with torch.no_grad(): lowerCamelCase : Union[str, Any] = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" ) model.eval() lowerCamelCase : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] ) lowerCamelCase : Tuple = model(__a )[0] lowerCamelCase : Dict = 33 lowerCamelCase : List[str] = torch.Size((1, 6, vocab_size) ) self.assertEqual(output.shape , __a ) lowerCamelCase : Tuple = torch.tensor( [[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) ) @slow def a__ ( self: Dict )-> str: with torch.no_grad(): lowerCamelCase : Any = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" ) model.eval() lowerCamelCase : Optional[Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) lowerCamelCase : Any = model(__a )[0] # compare the actual values for a slice. lowerCamelCase : Tuple = torch.tensor( [[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
42
0
"""simple docstring""" import numpy as np def snake_case ( UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ) -> Tuple: lowerCamelCase : str = int(np.ceil((x_end - xa) / h ) ) lowerCamelCase : List[str] = np.zeros((n + 1,) ) lowerCamelCase : List[str] = ya lowerCamelCase : Optional[int] = xa for k in range(_SCREAMING_SNAKE_CASE ): lowerCamelCase : int = f(_SCREAMING_SNAKE_CASE , y[k] ) lowerCamelCase : Optional[int] = f(x + 0.5 * h , y[k] + 0.5 * h * ka ) lowerCamelCase : List[Any] = f(x + 0.5 * h , y[k] + 0.5 * h * ka ) lowerCamelCase : Optional[Any] = f(x + h , y[k] + h * ka ) lowerCamelCase : Dict = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka) x += h return y if __name__ == "__main__": import doctest doctest.testmod()
706
"""simple docstring""" import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __lowerCamelCase :str = get_tests_dir('fixtures/spiece.model') @require_sentencepiece @require_tokenizers class A__ ( __lowercase , unittest.TestCase): """simple docstring""" snake_case__ : List[str] =AlbertTokenizer snake_case__ : Optional[Any] =AlbertTokenizerFast snake_case__ : Optional[int] =True snake_case__ : Any =True snake_case__ : Optional[int] =True def a__ ( self: Dict )-> Optional[Any]: super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase : int = AlbertTokenizer(__a ) tokenizer.save_pretrained(self.tmpdirname ) def a__ ( self: Tuple , __a: Tuple )-> Union[str, Any]: lowerCamelCase : List[str] = """this is a test""" lowerCamelCase : int = """this is a test""" return input_text, output_text def a__ ( self: Any )-> List[Any]: lowerCamelCase : int = """<pad>""" lowerCamelCase : Optional[int] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a ) def a__ ( self: Tuple )-> str: lowerCamelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<pad>""" ) self.assertEqual(vocab_keys[1] , """<unk>""" ) self.assertEqual(vocab_keys[-1] , """▁eloquent""" ) self.assertEqual(len(__a ) , 30_000 ) def a__ ( self: List[str] )-> Any: self.assertEqual(self.get_tokenizer().vocab_size , 30_000 ) def a__ ( self: Optional[Any] )-> Union[str, Any]: if not self.test_rust_tokenizer: return lowerCamelCase : str = self.get_tokenizer() lowerCamelCase : Tuple = self.get_rust_tokenizer() lowerCamelCase : Union[str, Any] = """I was born in 92000, and this is falsé.""" lowerCamelCase : List[str] = tokenizer.tokenize(__a ) lowerCamelCase : Tuple = rust_tokenizer.tokenize(__a ) self.assertListEqual(__a , __a ) lowerCamelCase : Dict = tokenizer.encode(__a , add_special_tokens=__a ) lowerCamelCase : List[str] = rust_tokenizer.encode(__a , add_special_tokens=__a ) self.assertListEqual(__a , __a ) lowerCamelCase : Any = self.get_rust_tokenizer() lowerCamelCase : List[str] = tokenizer.encode(__a ) lowerCamelCase : str = rust_tokenizer.encode(__a ) self.assertListEqual(__a , __a ) def a__ ( self: Tuple )-> List[Any]: lowerCamelCase : List[str] = AlbertTokenizer(__a , keep_accents=__a ) lowerCamelCase : int = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(__a , ["""▁this""", """▁is""", """▁a""", """▁test"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [48, 25, 21, 1_289] ) lowerCamelCase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( __a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] ) lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(__a ) self.assertListEqual(__a , [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] ) lowerCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__a ) self.assertListEqual( __a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , ) def a__ ( self: Tuple )-> str: lowerCamelCase : str = AlbertTokenizer(__a ) lowerCamelCase : Union[str, Any] = tokenizer.encode("""sequence builders""" ) lowerCamelCase : List[Any] = tokenizer.encode("""multi-sequence build""" ) lowerCamelCase : Any = tokenizer.build_inputs_with_special_tokens(__a ) lowerCamelCase : str = tokenizer.build_inputs_with_special_tokens(__a , __a ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ] @slow def a__ ( self: Any )-> Dict: # fmt: off lowerCamelCase : Optional[Any] = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__a , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
42
0
import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TextClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. __lowerCamelCase :Tuple = {'LayoutLMv2Config', 'LayoutLMv3Config'} @is_pipeline_test class A__ ( unittest.TestCase): """simple docstring""" snake_case__ : List[Any] =MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING snake_case__ : Union[str, Any] =TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: snake_case__ : Dict ={config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: snake_case__ : List[str] ={ config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } @require_torch def a__ ( self: List[str] )-> Optional[Any]: lowerCamelCase : Optional[Any] = pipeline( task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" ) lowerCamelCase : Tuple = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(__a ) , [{"""label""": """LABEL_0""", """score""": 0.5_04}] ) lowerCamelCase : int = text_classifier("""This is great !""" , top_k=2 ) self.assertEqual( nested_simplify(__a ) , [{"""label""": """LABEL_0""", """score""": 0.5_04}, {"""label""": """LABEL_1""", """score""": 0.4_96}] ) lowerCamelCase : List[str] = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 ) self.assertEqual( nested_simplify(__a ) , [ [{"""label""": """LABEL_0""", """score""": 0.5_04}, {"""label""": """LABEL_1""", """score""": 0.4_96}], [{"""label""": """LABEL_0""", """score""": 0.5_04}, {"""label""": """LABEL_1""", """score""": 0.4_96}], ] , ) lowerCamelCase : int = text_classifier("""This is great !""" , top_k=1 ) self.assertEqual(nested_simplify(__a ) , [{"""label""": """LABEL_0""", """score""": 0.5_04}] ) # Legacy behavior lowerCamelCase : Optional[Any] = text_classifier("""This is great !""" , return_all_scores=__a ) self.assertEqual(nested_simplify(__a ) , [{"""label""": """LABEL_0""", """score""": 0.5_04}] ) lowerCamelCase : List[str] = text_classifier("""This is great !""" , return_all_scores=__a ) self.assertEqual( nested_simplify(__a ) , [[{"""label""": """LABEL_0""", """score""": 0.5_04}, {"""label""": """LABEL_1""", """score""": 0.4_96}]] ) lowerCamelCase : Union[str, Any] = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=__a ) self.assertEqual( nested_simplify(__a ) , [ [{"""label""": """LABEL_0""", """score""": 0.5_04}, {"""label""": """LABEL_1""", """score""": 0.4_96}], [{"""label""": """LABEL_0""", """score""": 0.5_04}, {"""label""": """LABEL_1""", """score""": 0.4_96}], ] , ) lowerCamelCase : List[str] = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=__a ) self.assertEqual( nested_simplify(__a ) , [ {"""label""": """LABEL_0""", """score""": 0.5_04}, {"""label""": """LABEL_0""", """score""": 0.5_04}, ] , ) @require_torch def a__ ( self: Dict )-> List[str]: import torch lowerCamelCase : Tuple = pipeline( task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , ) lowerCamelCase : str = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(__a ) , [{"""label""": """LABEL_0""", """score""": 0.5_04}] ) @require_tf def a__ ( self: Optional[Any] )-> int: lowerCamelCase : Any = pipeline( task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" ) lowerCamelCase : Tuple = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(__a ) , [{"""label""": """LABEL_0""", """score""": 0.5_04}] ) @slow @require_torch def a__ ( self: Dict )-> Dict: lowerCamelCase : str = pipeline("""text-classification""" ) lowerCamelCase : str = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(__a ) , [{"""label""": """POSITIVE""", """score""": 1.0}] ) lowerCamelCase : Tuple = text_classifier("""This is bad !""" ) self.assertEqual(nested_simplify(__a ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] ) lowerCamelCase : Optional[int] = text_classifier("""Birds are a type of animal""" ) self.assertEqual(nested_simplify(__a ) , [{"""label""": """POSITIVE""", """score""": 0.9_88}] ) @slow @require_tf def a__ ( self: Dict )-> List[str]: lowerCamelCase : Optional[Any] = pipeline("""text-classification""" , framework="""tf""" ) lowerCamelCase : str = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(__a ) , [{"""label""": """POSITIVE""", """score""": 1.0}] ) lowerCamelCase : Dict = text_classifier("""This is bad !""" ) self.assertEqual(nested_simplify(__a ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] ) lowerCamelCase : str = text_classifier("""Birds are a type of animal""" ) self.assertEqual(nested_simplify(__a ) , [{"""label""": """POSITIVE""", """score""": 0.9_88}] ) def a__ ( self: str , __a: str , __a: int , __a: Dict )-> Tuple: lowerCamelCase : List[str] = TextClassificationPipeline(model=__a , tokenizer=__a ) return text_classifier, ["HuggingFace is in", "This is another test"] def a__ ( self: Optional[int] , __a: List[Any] , __a: List[str] )-> Optional[int]: lowerCamelCase : Dict = text_classifier.model # Small inputs because BartTokenizer tiny has maximum position embeddings = 22 lowerCamelCase : int = """HuggingFace is in""" lowerCamelCase : Optional[Any] = text_classifier(__a ) self.assertEqual(nested_simplify(__a ) , [{"""label""": ANY(__a ), """score""": ANY(__a )}] ) self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() ) lowerCamelCase : List[Any] = ["""HuggingFace is in """, """Paris is in France"""] lowerCamelCase : Dict = text_classifier(__a ) self.assertEqual( nested_simplify(__a ) , [{"""label""": ANY(__a ), """score""": ANY(__a )}, {"""label""": ANY(__a ), """score""": ANY(__a )}] , ) self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() ) self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() ) # Forcing to get all results with `top_k=None` # This is NOT the legacy format lowerCamelCase : Any = text_classifier(__a , top_k=__a ) lowerCamelCase : Dict = len(model.config.idalabel.values() ) self.assertEqual( nested_simplify(__a ) , [[{"""label""": ANY(__a ), """score""": ANY(__a )}] * N, [{"""label""": ANY(__a ), """score""": ANY(__a )}] * N] , ) lowerCamelCase : Tuple = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""} lowerCamelCase : Dict = text_classifier(__a ) self.assertEqual( nested_simplify(__a ) , {"""label""": ANY(__a ), """score""": ANY(__a )} , ) self.assertTrue(outputs["""label"""] in model.config.idalabel.values() ) # This might be used a text pair, but tokenizer + pipe interaction # makes it hard to understand that it's not using the pair properly # https://github.com/huggingface/transformers/issues/17305 # We disabled this usage instead as it was outputting wrong outputs. lowerCamelCase : Union[str, Any] = [["""HuggingFace is in """, """Paris is in France"""]] with self.assertRaises(__a ): text_classifier(__a ) # This used to be valid for doing text pairs # We're keeping it working because of backward compatibility lowerCamelCase : Any = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] ) self.assertEqual( nested_simplify(__a ) , [{"""label""": ANY(__a ), """score""": ANY(__a )}] , ) self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
707
"""simple docstring""" __lowerCamelCase :List[Any] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} __lowerCamelCase :Union[str, Any] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def snake_case ( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] ) -> list[int]: lowerCamelCase : Tuple = True lowerCamelCase : Any = [] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) order.append(UpperCamelCase__ ) return order def snake_case ( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] ) -> list[int]: lowerCamelCase : List[Any] = True lowerCamelCase : int = [vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return component def snake_case ( UpperCamelCase__ : dict[int, list[int]] ) -> list[list[int]]: lowerCamelCase : int = len(UpperCamelCase__ ) * [False] lowerCamelCase : dict[int, list[int]] = {vert: [] for vert in range(len(UpperCamelCase__ ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(UpperCamelCase__ ) lowerCamelCase : int = [] for i, was_visited in enumerate(UpperCamelCase__ ): if not was_visited: order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase : Tuple = [] lowerCamelCase : str = len(UpperCamelCase__ ) * [False] for i in range(len(UpperCamelCase__ ) ): lowerCamelCase : Any = order[len(UpperCamelCase__ ) - i - 1] if not visited[vert]: lowerCamelCase : List[str] = find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) components_list.append(UpperCamelCase__ ) return components_list
42
0
"""simple docstring""" def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : int ) -> List[str]: if principal <= 0: raise Exception("""Principal borrowed must be > 0""" ) if rate_per_annum < 0: raise Exception("""Rate of interest must be >= 0""" ) if years_to_repay <= 0 or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): raise Exception("""Years to repay must be an integer > 0""" ) # Yearly rate is divided by 12 to get monthly rate lowerCamelCase : int = rate_per_annum / 12 # Years to repay is multiplied by 12 to get number of payments as payment is monthly lowerCamelCase : Optional[int] = years_to_repay * 12 return ( principal * rate_per_month * (1 + rate_per_month) ** number_of_payments / ((1 + rate_per_month) ** number_of_payments - 1) ) if __name__ == "__main__": import doctest doctest.testmod()
708
"""simple docstring""" from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase :str = logging.get_logger(__name__) __lowerCamelCase :Any = { 'huggingface/time-series-transformer-tourism-monthly': ( 'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json' ), # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer } class A__ ( __lowercase): """simple docstring""" snake_case__ : List[Any] ='''time_series_transformer''' snake_case__ : List[Any] ={ '''hidden_size''': '''d_model''', '''num_attention_heads''': '''encoder_attention_heads''', '''num_hidden_layers''': '''encoder_layers''', } def __init__( self: List[str] , __a: Optional[int] = None , __a: Optional[int] = None , __a: str = "student_t" , __a: str = "nll" , __a: int = 1 , __a: List[int] = [1, 2, 3, 4, 5, 6, 7] , __a: Optional[Union[str, bool]] = "mean" , __a: int = 0 , __a: int = 0 , __a: int = 0 , __a: int = 0 , __a: Optional[List[int]] = None , __a: Optional[List[int]] = None , __a: int = 32 , __a: int = 32 , __a: int = 2 , __a: int = 2 , __a: int = 2 , __a: int = 2 , __a: bool = True , __a: str = "gelu" , __a: int = 64 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: float = 0.1 , __a: int = 100 , __a: float = 0.02 , __a: Tuple=True , **__a: str , )-> Any: # time series specific configuration lowerCamelCase : str = prediction_length lowerCamelCase : Optional[Any] = context_length or prediction_length lowerCamelCase : Tuple = distribution_output lowerCamelCase : Any = loss lowerCamelCase : List[Any] = input_size lowerCamelCase : int = num_time_features lowerCamelCase : Dict = lags_sequence lowerCamelCase : Optional[int] = scaling lowerCamelCase : int = num_dynamic_real_features lowerCamelCase : Tuple = num_static_real_features lowerCamelCase : Any = num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(__a ) != num_static_categorical_features: raise ValueError( """The cardinality should be a list of the same length as `num_static_categorical_features`""" ) lowerCamelCase : int = cardinality else: lowerCamelCase : Dict = [0] if embedding_dimension and num_static_categorical_features > 0: if len(__a ) != num_static_categorical_features: raise ValueError( """The embedding dimension should be a list of the same length as `num_static_categorical_features`""" ) lowerCamelCase : str = embedding_dimension else: lowerCamelCase : str = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] lowerCamelCase : Any = num_parallel_samples # Transformer architecture configuration lowerCamelCase : Any = input_size * len(__a ) + self._number_of_features lowerCamelCase : List[str] = d_model lowerCamelCase : Tuple = encoder_attention_heads lowerCamelCase : Optional[int] = decoder_attention_heads lowerCamelCase : Union[str, Any] = encoder_ffn_dim lowerCamelCase : str = decoder_ffn_dim lowerCamelCase : str = encoder_layers lowerCamelCase : Any = decoder_layers lowerCamelCase : Optional[int] = dropout lowerCamelCase : List[str] = attention_dropout lowerCamelCase : Tuple = activation_dropout lowerCamelCase : Optional[int] = encoder_layerdrop lowerCamelCase : int = decoder_layerdrop lowerCamelCase : Optional[int] = activation_function lowerCamelCase : Optional[Any] = init_std lowerCamelCase : Optional[Any] = use_cache super().__init__(is_encoder_decoder=__a , **__a ) @property def a__ ( self: int )-> int: return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
42
0
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCamelCase :Any = { 'configuration_autoformer': [ 'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AutoformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase :Tuple = [ 'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'AutoformerForPrediction', 'AutoformerModel', 'AutoformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_autoformer import ( AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, AutoformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_autoformer import ( AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, AutoformerForPrediction, AutoformerModel, AutoformerPreTrainedModel, ) else: import sys __lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
709
"""simple docstring""" from __future__ import annotations __lowerCamelCase :int = 10 def snake_case ( UpperCamelCase__ : list[int] ) -> list[int]: lowerCamelCase : int = 1 lowerCamelCase : Union[str, Any] = max(UpperCamelCase__ ) while placement <= max_digit: # declare and initialize empty buckets lowerCamelCase : list[list] = [[] for _ in range(UpperCamelCase__ )] # split list_of_ints between the buckets for i in list_of_ints: lowerCamelCase : Any = int((i / placement) % RADIX ) buckets[tmp].append(UpperCamelCase__ ) # put each buckets' contents into list_of_ints lowerCamelCase : Dict = 0 for b in range(UpperCamelCase__ ): for i in buckets[b]: lowerCamelCase : List[str] = i a += 1 # move to next placement *= RADIX return list_of_ints if __name__ == "__main__": import doctest doctest.testmod()
42
0
"""simple docstring""" def snake_case ( UpperCamelCase__ : str ) -> Union[str, Any]: if not all(char in """01""" for char in bin_string ): raise ValueError("""Non-binary value was passed to the function""" ) if not bin_string: raise ValueError("""Empty string was passed to the function""" ) lowerCamelCase : List[str] = """""" while len(UpperCamelCase__ ) % 3 != 0: lowerCamelCase : Union[str, Any] = """0""" + bin_string lowerCamelCase : Tuple = [ bin_string[index : index + 3] for index in range(len(UpperCamelCase__ ) ) if index % 3 == 0 ] for bin_group in bin_string_in_3_list: lowerCamelCase : int = 0 for index, val in enumerate(UpperCamelCase__ ): oct_val += int(2 ** (2 - index) * int(UpperCamelCase__ ) ) oct_string += str(UpperCamelCase__ ) return oct_string if __name__ == "__main__": from doctest import testmod testmod()
710
"""simple docstring""" import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=None ) -> Tuple: # set parameter of one layer assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match' lowerCamelCase : Dict = nn.Parameter(UpperCamelCase__ ) if bias is not None: assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match' lowerCamelCase : Any = nn.Parameter(UpperCamelCase__ ) def snake_case ( UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ) -> Union[str, Any]: # set torch weights for 1-to-1 comparison lowerCamelCase : Dict = np.asarray(weights[0] ) lowerCamelCase : List[Any] = np.asarray(weights[1] ) lowerCamelCase : List[str] = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , ) set_param( torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , ) def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any ) -> List[Any]: # set torch weights for 1-to-1 comparison lowerCamelCase : Tuple = np.asarray(weights[0] ) lowerCamelCase : Any = np.asarray(weights[1] ) lowerCamelCase : List[Any] = np.asarray(weights[2] ) lowerCamelCase : List[str] = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , ) set_param( torch_layer.self_attention.key , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , ) set_param( torch_layer.self_attention.value , torch.tensor(UpperCamelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , UpperCamelCase__ ) , ) set_param( torch_layer.output.dense , torch.tensor(UpperCamelCase__ ).view(-1 , UpperCamelCase__ ).contiguous().transpose(0 , 1 ) , ) def snake_case ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ) -> Optional[Any]: # layernorm 1 lowerCamelCase : str = weights[0][0][0] lowerCamelCase : Optional[int] = np.asarray(layer_norm_a[0] ) lowerCamelCase : Tuple = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , ) # lsh weights + output lowerCamelCase : List[Any] = weights[0][1] if len(UpperCamelCase__ ) < 4: set_layer_weights_in_torch_lsh(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ ) else: set_layer_weights_in_torch_local(UpperCamelCase__ , torch_block.attention , UpperCamelCase__ ) # intermediate weighs lowerCamelCase : int = weights[2][0][1][2] # Chunked Feed Forward if len(UpperCamelCase__ ) == 4: lowerCamelCase : Dict = intermediate_weights[2] # layernorm 2 lowerCamelCase : Optional[int] = np.asarray(intermediate_weights[0][0] ) lowerCamelCase : Tuple = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , ) # intermediate dense lowerCamelCase : Optional[Any] = np.asarray(intermediate_weights[1][0] ) lowerCamelCase : Union[str, Any] = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , ) # intermediate out lowerCamelCase : Optional[int] = np.asarray(intermediate_weights[4][0] ) lowerCamelCase : List[Any] = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , ) def snake_case ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int ) -> List[Any]: # reformer model lowerCamelCase : List[Any] = torch_model.reformer # word embeds lowerCamelCase : Union[str, Any] = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(UpperCamelCase__ ) , ) if isinstance(weights[3] , UpperCamelCase__ ): lowerCamelCase : Union[str, Any] = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): lowerCamelCase : str = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), F'{position_embeddings[emb_idx]} emb does not match' lowerCamelCase : Dict = nn.Parameter(torch.tensor(UpperCamelCase__ ) ) lowerCamelCase : int = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( UpperCamelCase__ ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): lowerCamelCase : Dict = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # output layer norm lowerCamelCase : Any = np.asarray(weights[7][0] ) lowerCamelCase : List[str] = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(UpperCamelCase__ ) , torch.tensor(UpperCamelCase__ ) , ) # output embeddings lowerCamelCase : List[Any] = np.asarray(weights[9][0] ) lowerCamelCase : Optional[int] = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder , torch.tensor(UpperCamelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(UpperCamelCase__ ) , ) def snake_case ( UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str ) -> Optional[int]: # Initialise PyTorch model lowerCamelCase : Union[str, Any] = ReformerConfig.from_json_file(UpperCamelCase__ ) print(F'Building PyTorch model from configuration: {config}' ) lowerCamelCase : str = ReformerModelWithLMHead(UpperCamelCase__ ) with open(UpperCamelCase__ , """rb""" ) as f: lowerCamelCase : str = pickle.load(UpperCamelCase__ )["""weights"""] set_model_weights_in_torch(UpperCamelCase__ , UpperCamelCase__ , config.hidden_size ) # Save pytorch-model print(F'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , UpperCamelCase__ ) if __name__ == "__main__": __lowerCamelCase :Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained Reformer model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __lowerCamelCase :Optional[int] = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
42
0
"""simple docstring""" import functools def snake_case ( UpperCamelCase__ : Dict , UpperCamelCase__ : Dict ) -> int: if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for day in days ): raise ValueError("""The parameter days should be a list of integers""" ) if len(__lowerCAmelCase ) != 3 or not all(isinstance(__lowerCAmelCase , __lowerCAmelCase ) for cost in costs ): raise ValueError("""The parameter costs should be a list of three integers""" ) if len(__lowerCAmelCase ) == 0: return 0 if min(__lowerCAmelCase ) <= 0: raise ValueError("""All days elements should be greater than 0""" ) if max(__lowerCAmelCase ) >= 366: raise ValueError("""All days elements should be less than 366""" ) lowerCamelCase : Tuple = set(__lowerCAmelCase ) @functools.cache def dynamic_programming(UpperCamelCase__ : int ) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
711
"""simple docstring""" import inspect import unittest import torch import torch.nn as nn from accelerate.hooks import ( AlignDevicesHook, ModelHook, SequentialHook, add_hook_to_module, attach_align_device_hook, remove_hook_from_module, remove_hook_from_submodules, ) from accelerate.test_utils import require_multi_gpu class A__ ( nn.Module): """simple docstring""" def __init__( self: Dict )-> Dict: super().__init__() lowerCamelCase : Tuple = nn.Linear(3 , 4 ) lowerCamelCase : Optional[Any] = nn.BatchNormad(4 ) lowerCamelCase : Optional[Any] = nn.Linear(4 , 5 ) def a__ ( self: List[str] , __a: List[Any] )-> Optional[Any]: return self.lineara(self.batchnorm(self.lineara(__a ) ) ) class A__ ( __lowercase): """simple docstring""" def a__ ( self: Tuple , __a: int , *__a: Any , **__a: Tuple )-> Tuple: return (args[0] + 1,) + args[1:], kwargs class A__ ( __lowercase): """simple docstring""" def a__ ( self: Optional[int] , __a: List[str] , __a: List[Any] )-> List[str]: return output + 1 class A__ ( unittest.TestCase): """simple docstring""" def a__ ( self: int )-> str: lowerCamelCase : List[str] = ModelForTest() lowerCamelCase : Dict = ModelHook() add_hook_to_module(__a , __a ) self.assertEqual(test_model._hf_hook , __a ) self.assertTrue(hasattr(__a , """_old_forward""" ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , """forward""" ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] ) remove_hook_from_module(__a ) self.assertFalse(hasattr(__a , """_hf_hook""" ) ) self.assertFalse(hasattr(__a , """_old_forward""" ) ) def a__ ( self: int )-> str: lowerCamelCase : List[str] = ModelForTest() lowerCamelCase : Union[str, Any] = ModelHook() add_hook_to_module(__a , __a ) add_hook_to_module(__a , __a , append=__a ) self.assertEqual(isinstance(test_model._hf_hook , __a ) , __a ) self.assertEqual(len(test_model._hf_hook.hooks ) , 2 ) self.assertTrue(hasattr(__a , """_old_forward""" ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , """forward""" ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] ) remove_hook_from_module(__a ) self.assertFalse(hasattr(__a , """_hf_hook""" ) ) self.assertFalse(hasattr(__a , """_old_forward""" ) ) def a__ ( self: List[Any] )-> List[str]: lowerCamelCase : str = ModelForTest() lowerCamelCase : Dict = torch.randn(2 , 3 ) lowerCamelCase : Union[str, Any] = test_model(x + 1 ) lowerCamelCase : Optional[int] = test_model(x + 2 ) lowerCamelCase : List[Any] = PreForwardHook() add_hook_to_module(__a , __a ) lowerCamelCase : Optional[int] = test_model(__a ) self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain lowerCamelCase : Dict = PreForwardHook() add_hook_to_module(__a , __a ) lowerCamelCase : Tuple = test_model(__a ) self.assertTrue(torch.allclose(__a , __a , atol=1e-5 ) ) # You need to use the sequential hook to chain two or more hooks lowerCamelCase : Any = SequentialHook(PreForwardHook() , PreForwardHook() ) add_hook_to_module(__a , __a ) lowerCamelCase : Optional[Any] = test_model(__a ) assert torch.allclose(__a , __a , atol=1e-5 ) def a__ ( self: Any )-> Optional[int]: lowerCamelCase : str = ModelForTest() lowerCamelCase : List[str] = torch.randn(2 , 3 ) lowerCamelCase : int = test_model(__a ) lowerCamelCase : Dict = PostForwardHook() add_hook_to_module(__a , __a ) lowerCamelCase : Tuple = test_model(__a ) self.assertTrue(torch.allclose(__a , output + 1 , atol=1e-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain lowerCamelCase : str = PostForwardHook() add_hook_to_module(__a , __a ) lowerCamelCase : Optional[Any] = test_model(__a ) self.assertTrue(torch.allclose(__a , output + 1 , atol=1e-5 ) ) # You need to use the sequential hook to chain two or more hooks lowerCamelCase : Union[str, Any] = SequentialHook(PostForwardHook() , PostForwardHook() ) add_hook_to_module(__a , __a ) lowerCamelCase : str = test_model(__a ) assert torch.allclose(__a , output + 2 , atol=1e-5 ) def a__ ( self: int )-> Dict: lowerCamelCase : List[Any] = ModelForTest() lowerCamelCase : Optional[int] = torch.randn(2 , 3 ) lowerCamelCase : List[str] = test_model(__a ) lowerCamelCase : Any = PostForwardHook() add_hook_to_module(__a , __a ) lowerCamelCase : str = test_model(__a ) self.assertTrue(torch.allclose(__a , output + 1 ) ) self.assertTrue(outputa.requires_grad ) lowerCamelCase : Optional[int] = True lowerCamelCase : Optional[int] = test_model(__a ) self.assertFalse(outputa.requires_grad ) @require_multi_gpu def a__ ( self: List[str] )-> Union[str, Any]: lowerCamelCase : int = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) ) self.assertEqual(model.lineara.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) ) self.assertEqual(model.lineara.weight.device , torch.device(1 ) ) # We can still make a forward pass. The input does not need to be on any particular device lowerCamelCase : str = torch.randn(2 , 3 ) lowerCamelCase : Dict = model(__a ) self.assertEqual(output.device , torch.device(1 ) ) # We can add a general hook to put back output on same device as input. add_hook_to_module(__a , AlignDevicesHook(io_same_device=__a ) ) lowerCamelCase : Optional[int] = torch.randn(2 , 3 ).to(0 ) lowerCamelCase : str = model(__a ) self.assertEqual(output.device , torch.device(0 ) ) def a__ ( self: List[str] )-> Tuple: lowerCamelCase : Union[str, Any] = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices lowerCamelCase : Tuple = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True} add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) # Buffers are not included in the offload by default, so are on the execution device lowerCamelCase : List[Any] = torch.device(hook_kwargs["""execution_device"""] ) self.assertEqual(model.batchnorm.running_mean.device , __a ) lowerCamelCase : Optional[Any] = torch.randn(2 , 3 ) lowerCamelCase : Optional[Any] = model(__a ) self.assertEqual(output.device , __a ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # Now test with buffers included in the offload lowerCamelCase : Any = { """execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True, """offload_buffers""": True, } add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**__a ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**__a ) ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) ) lowerCamelCase : int = torch.randn(2 , 3 ) lowerCamelCase : Optional[int] = model(__a ) self.assertEqual(output.device , __a ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) def a__ ( self: Any )-> List[str]: lowerCamelCase : int = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices lowerCamelCase : int = 0 if torch.cuda.is_available() else """cpu""" attach_align_device_hook(__a , execution_device=__a , offload=__a ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) # Buffers are not included in the offload by default, so are on the execution device lowerCamelCase : List[Any] = torch.device(__a ) self.assertEqual(model.batchnorm.running_mean.device , __a ) lowerCamelCase : Dict = torch.randn(2 , 3 ) lowerCamelCase : Optional[Any] = model(__a ) self.assertEqual(output.device , __a ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(__a ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # Now test with buffers included in the offload attach_align_device_hook(__a , execution_device=__a , offload=__a , offload_buffers=__a ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) ) lowerCamelCase : Optional[int] = torch.randn(2 , 3 ) lowerCamelCase : int = model(__a ) self.assertEqual(output.device , __a ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(__a ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) def a__ ( self: Optional[Any] )-> List[Any]: lowerCamelCase : List[Any] = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices lowerCamelCase : Any = 0 if torch.cuda.is_available() else """cpu""" attach_align_device_hook( __a , execution_device=__a , offload=__a , weights_map=model.state_dict() ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) # Buffers are not included in the offload by default, so are on the execution device lowerCamelCase : List[Any] = torch.device(__a ) self.assertEqual(model.batchnorm.running_mean.device , __a ) lowerCamelCase : Dict = torch.randn(2 , 3 ) lowerCamelCase : int = model(__a ) self.assertEqual(output.device , __a ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(__a ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # Now test with buffers included in the offload attach_align_device_hook( __a , execution_device=__a , offload=__a , weights_map=model.state_dict() , offload_buffers=__a , ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) ) lowerCamelCase : Tuple = torch.randn(2 , 3 ) lowerCamelCase : Any = model(__a ) self.assertEqual(output.device , __a ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(__a ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
42
0
"""simple docstring""" from __future__ import annotations import os from collections.abc import Mapping __lowerCamelCase :Optional[int] = tuple[int, int] class A__ : """simple docstring""" def __init__( self: Union[str, Any] , __a: List[str] , __a: Union[str, Any] )-> Tuple: lowerCamelCase : set[int] = vertices lowerCamelCase : dict[EdgeT, int] = { (min(_UpperCAmelCase ), max(_UpperCAmelCase )): weight for edge, weight in edges.items() } def a__ ( self: Any , __a: Optional[Any] , __a: str )-> Optional[Any]: self.vertices.add(edge[0] ) self.vertices.add(edge[1] ) lowerCamelCase : Any = weight def a__ ( self: Optional[Any] )-> Tuple: lowerCamelCase : Graph = Graph({min(self.vertices )} , {} ) lowerCamelCase : EdgeT lowerCamelCase : int lowerCamelCase : EdgeT lowerCamelCase : int while len(subgraph.vertices ) < len(self.vertices ): lowerCamelCase : List[Any] = max(self.edges.values() ) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: lowerCamelCase : str = edge lowerCamelCase : int = weight subgraph.add_edge(_UpperCAmelCase , _UpperCAmelCase ) return subgraph def snake_case ( UpperCamelCase__ : str = "p107_network.txt" ) -> int: lowerCamelCase : str = os.path.abspath(os.path.dirname(a_ ) ) lowerCamelCase : str = os.path.join(a_ , a_ ) lowerCamelCase : dict[EdgeT, int] = {} lowerCamelCase : list[str] lowerCamelCase : int lowerCamelCase : int with open(a_ ) as f: lowerCamelCase : Any = f.read().strip().split("""\n""" ) lowerCamelCase : Union[str, Any] = [line.split(""",""" ) for line in data] for edgea in range(1 , len(a_ ) ): for edgea in range(a_ ): if adjaceny_matrix[edgea][edgea] != "-": lowerCamelCase : int = int(adjaceny_matrix[edgea][edgea] ) lowerCamelCase : Graph = Graph(set(range(len(a_ ) ) ) , a_ ) lowerCamelCase : Graph = graph.prims_algorithm() lowerCamelCase : int = sum(graph.edges.values() ) lowerCamelCase : int = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(f"""{solution() = }""")
712
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __lowerCamelCase :Optional[Any] = { 'configuration_encodec': [ 'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP', 'EncodecConfig', ], 'feature_extraction_encodec': ['EncodecFeatureExtractor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase :Union[str, Any] = [ 'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST', 'EncodecModel', 'EncodecPreTrainedModel', ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys __lowerCamelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
42
0